From a81aae612f409bc767af3013eeccb71226831fc2 Mon Sep 17 00:00:00 2001 From: Dan Prince Date: Wed, 17 Aug 2011 11:19:34 -0400 Subject: Add modules for task and vm states. --- nova/compute/task_state.py | 28 ++++++++++++++++++++++++++++ nova/compute/vm_state.py | 35 +++++++++++++++++++++++++++++++++++ 2 files changed, 63 insertions(+) create mode 100644 nova/compute/task_state.py create mode 100644 nova/compute/vm_state.py diff --git a/nova/compute/task_state.py b/nova/compute/task_state.py new file mode 100644 index 000000000..b4dc9af51 --- /dev/null +++ b/nova/compute/task_state.py @@ -0,0 +1,28 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""Possible task states for instances""" + +BUILD_BLOCK_DEVICE_MAPPING='block_device_mapping' +NETWORKING='networking' + +PASSWORD='password' + +RESIZE_PREP='resize_prep' +RESIZE_MIGRATING='resize_migrating' +RESIZE_MIGRATED='resize_migrated' +RESIZE_FINISH='resize_finish' diff --git a/nova/compute/vm_state.py b/nova/compute/vm_state.py new file mode 100644 index 000000000..e81cba1f0 --- /dev/null +++ b/nova/compute/vm_state.py @@ -0,0 +1,35 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""Possible vm states for instances""" + +BUILD='build' +REBUILD='rebuild' +REBOOT='reboot' +DELETE='delete' +STOP='stop' +START='start' +RESIZE='resize' +VERIFY_RESIZE='verify_resize' +PAUSE='pause' +UNPAUSE='unpause' + +SUSPEND='suspend' +RESUME='resume' + +RESCUE='rescue' +UNRESCUE='unrescue' -- cgit From 5270b0a565ec26d2f7de3a7d95be86433d8c6bd2 Mon Sep 17 00:00:00 2001 From: Dan Prince Date: Wed, 17 Aug 2011 11:46:28 -0400 Subject: Split set state into vm, task, and power state functions. --- nova/db/sqlalchemy/api.py | 46 +++++++++++++++++++++++++++++++++++++--------- 1 file changed, 37 insertions(+), 9 deletions(-) diff --git a/nova/db/sqlalchemy/api.py b/nova/db/sqlalchemy/api.py index 57a4370d8..07207b8ee 100644 --- a/nova/db/sqlalchemy/api.py +++ b/nova/db/sqlalchemy/api.py @@ -1439,15 +1439,43 @@ def instance_get_floating_address(context, instance_id): @require_admin_context -def instance_set_state(context, instance_id, state, description=None): - # TODO(devcamcar): Move this out of models and into driver - from nova.compute import power_state - if not description: - description = power_state.name(state) - db.instance_update(context, - instance_id, - {'state': state, - 'state_description': description}) +def instance_set_power_state(context, instance_id, power_state): + session = get_session() + partial = session.query(models.Instance) + + if utils.is_uuid_like(instance_id): + result = partial.filter_by(uuid=instance_id) + else: + result = partial.filter_by(id=instance_id) + + result.update({'power_state': power_state}) + + +@require_admin_context +def instance_set_vm_state(context, instance_id, vm_state): + # vm_state = running, halted, suspended, paused + session = get_session() + partial = session.query(models.Instance) + + if utils.is_uuid_like(instance_id): + result = partial.filter_by(uuid=instance_id) + else: + result = partial.filter_by(id=instance_id) + + result.update({'vm_state': vm_state}) + + +def instance_set_task_state(context, instance_id, task_state): + # task_state = running, halted, suspended, paused + session = get_session() + partial = session.query(models.Instance) + + if utils.is_uuid_like(instance_id): + result = partial.filter_by(uuid=instance_id) + else: + result = partial.filter_by(id=instance_id) + + result.update({'task_state': task_state}) @require_context -- cgit From 6515b115de8cd026ea88aab796d4364ccc2ac4f0 Mon Sep 17 00:00:00 2001 From: Dan Prince Date: Wed, 17 Aug 2011 11:51:39 -0400 Subject: Pep8 fixes. --- nova/db/sqlalchemy/api.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/nova/db/sqlalchemy/api.py b/nova/db/sqlalchemy/api.py index 07207b8ee..e7d02cb5d 100644 --- a/nova/db/sqlalchemy/api.py +++ b/nova/db/sqlalchemy/api.py @@ -1442,7 +1442,7 @@ def instance_get_floating_address(context, instance_id): def instance_set_power_state(context, instance_id, power_state): session = get_session() partial = session.query(models.Instance) - + if utils.is_uuid_like(instance_id): result = partial.filter_by(uuid=instance_id) else: @@ -1456,7 +1456,7 @@ def instance_set_vm_state(context, instance_id, vm_state): # vm_state = running, halted, suspended, paused session = get_session() partial = session.query(models.Instance) - + if utils.is_uuid_like(instance_id): result = partial.filter_by(uuid=instance_id) else: @@ -1469,7 +1469,7 @@ def instance_set_task_state(context, instance_id, task_state): # task_state = running, halted, suspended, paused session = get_session() partial = session.query(models.Instance) - + if utils.is_uuid_like(instance_id): result = partial.filter_by(uuid=instance_id) else: -- cgit From bd2e98c064b7c1e9c866f3013e13af7883e11e05 Mon Sep 17 00:00:00 2001 From: Dan Prince Date: Wed, 17 Aug 2011 13:30:47 -0400 Subject: Initial instance states migration. --- .../versions/037_update_instance_states.py | 57 ++++++++++++++++++++++ 1 file changed, 57 insertions(+) create mode 100644 nova/db/sqlalchemy/migrate_repo/versions/037_update_instance_states.py diff --git a/nova/db/sqlalchemy/migrate_repo/versions/037_update_instance_states.py b/nova/db/sqlalchemy/migrate_repo/versions/037_update_instance_states.py new file mode 100644 index 000000000..0bbe39def --- /dev/null +++ b/nova/db/sqlalchemy/migrate_repo/versions/037_update_instance_states.py @@ -0,0 +1,57 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 OpenStack LLC. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from sqlalchemy import MetaData, Table + +meta = MetaData() + +c_task_state = Column('task_state', + String(length=255, convert_unicode=False, + assert_unicode=None, unicode_error=None, + _warn_on_bytestring=False), + nullable=True) + + +def upgrade(migrate_engine): + # Upgrade operations go here. Don't create your own engine; + # bind migrate_engine to your metadata + meta.bind = migrate_engine + + instances = Table('instances', meta, autoload=True, + autoload_with=migrate_engine) + + c_state = instances.c.state + c_state.alter(name='power_state') + + c_vm_state = instances.c.state_description + c_vm_state.alter(name='vm_state') + + instances.create_column(c_task_state) + + +def downgrade(migrate_engine): + meta.bind = migrate_engine + + instances = Table('instances', meta, autoload=True, + autoload_with=migrate_engine) + + c_state = instances.c.power_state + c_state.alter(name='state') + + c_vm_state = instances.c.vm_state + c_vm_state.alter(name='state_description') + + instances.drop_column('task_state') -- cgit From 1d1d027554d6be355bd9b52b2d87081d06f05045 Mon Sep 17 00:00:00 2001 From: Brian Lamar Date: Wed, 17 Aug 2011 16:23:40 -0400 Subject: Updated compute manager/API to use vm/task states. Updated vm/task states to cover a few more cases I encountered. --- nova/compute/api.py | 57 ++++-- nova/compute/manager.py | 441 ++++++++++++++++++++++++--------------------- nova/compute/task_state.py | 17 +- nova/compute/vm_state.py | 8 +- 4 files changed, 296 insertions(+), 227 deletions(-) diff --git a/nova/compute/api.py b/nova/compute/api.py index e909e9959..ec760853e 100644 --- a/nova/compute/api.py +++ b/nova/compute/api.py @@ -36,6 +36,7 @@ from nova import utils from nova import volume from nova.compute import instance_types from nova.compute import power_state +from nova.compute import vm_state from nova.compute.utils import terminate_volumes from nova.scheduler import api as scheduler_api from nova.db import base @@ -74,10 +75,13 @@ def generate_default_hostname(instance): def _is_able_to_shutdown(instance, instance_id): - states = {'terminating': "Instance %s is already being terminated", - 'migrating': "Instance %s is being migrated", - 'stopping': "Instance %s is being stopped"} - msg = states.get(instance['state_description']) + states = { + vm_state.DELETE: "Instance %s is already being terminated", + vm_state.MIGRATE: "Instance %s is being migrated", + vm_state.RESIZE: "Instance %s is being resized", + vm_state.STOP: "Instance %s is being stopped", + } + msg = states.get(instance['vm_state']) if msg: LOG.warning(_(msg), instance_id) return False @@ -231,8 +235,8 @@ class API(base.Base): 'image_ref': image_href, 'kernel_id': kernel_id or '', 'ramdisk_id': ramdisk_id or '', - 'state': 0, - 'state_description': 'scheduling', + 'power_state': power_state.NOSTATE, + 'vm_state': vm_state.BUILD, 'user_id': context.user_id, 'project_id': context.project_id, 'launch_time': time.strftime('%Y-%m-%dT%H:%M:%SZ', time.gmtime()), @@ -648,9 +652,8 @@ class API(base.Base): return self.update(context, - instance['id'], - state_description='terminating', - state=0, + instance_id, + vm_state=vm_state.DELETE, terminated_at=utils.utcnow()) host = instance['host'] @@ -671,9 +674,8 @@ class API(base.Base): return self.update(context, - instance['id'], - state_description='stopping', - state=power_state.NOSTATE, + instance_id, + vm_state=vm_state.STOP, terminated_at=utils.utcnow()) host = instance['host'] @@ -685,12 +687,15 @@ class API(base.Base): """Start an instance.""" LOG.debug(_("Going to try to start %s"), instance_id) instance = self._get_instance(context, instance_id, 'starting') - if instance['state_description'] != 'stopped': - _state_description = instance['state_description'] + vm_state = instance["vm_state"] + + if vm_state != vm_state.STOP: LOG.warning(_("Instance %(instance_id)s is not " - "stopped(%(_state_description)s)") % locals()) + "stopped. (%(vm_state)s)") % locals()) return + self.update(context, instance_id, vm_state=vm_state.ACTIVE) + # TODO(yamahata): injected_files isn't supported right now. # It is used only for osapi. not for ec2 api. # availability_zone isn't used by run_instance. @@ -918,6 +923,7 @@ class API(base.Base): @scheduler_api.reroute_compute("reboot") def reboot(self, context, instance_id): """Reboot the given instance.""" + self.update(context, instance_id, vm_state=vm_state.REBOOT) self._cast_compute_message('reboot_instance', context, instance_id) @scheduler_api.reroute_compute("rebuild") @@ -925,8 +931,12 @@ class API(base.Base): metadata=None, files_to_inject=None): """Rebuild the given instance with the provided metadata.""" instance = db.api.instance_get(context, instance_id) + invalid_rebuild_states = [ + vm_state.BUILD, + vm_state.REBUILD, + ] - if instance["state"] == power_state.BUILDING: + if instance["vm_state"] in invalid_rebuild_states: msg = _("Instance already building") raise exception.BuildInProgress(msg) @@ -946,6 +956,8 @@ class API(base.Base): "injected_files": files_to_inject, } + self.update(context, instance_id, vm_state=vm_state.REBUILD) + self._cast_compute_message('rebuild_instance', context, instance_id, @@ -963,6 +975,8 @@ class API(base.Base): raise exception.MigrationNotFoundByStatus(instance_id=instance_id, status='finished') + self.update(context, instance_id, vm_state=vm_state.ACTIVE) + params = {'migration_id': migration_ref['id']} self._cast_compute_message('revert_resize', context, instance_ref['uuid'], @@ -983,6 +997,9 @@ class API(base.Base): if not migration_ref: raise exception.MigrationNotFoundByStatus(instance_id=instance_id, status='finished') + + self.update(context, instance_id, vm_state=vm_state.ACTIVE) + params = {'migration_id': migration_ref['id']} self._cast_compute_message('confirm_resize', context, instance_ref['uuid'], @@ -1028,6 +1045,8 @@ class API(base.Base): if (current_memory_mb == new_memory_mb) and flavor_id: raise exception.CannotResizeToSameSize() + self.update(context, instance_id, vm_state=vm_state.RESIZE) + instance_ref = self._get_instance(context, instance_id, 'resize') self._cast_scheduler_message(context, {"method": "prep_resize", @@ -1061,11 +1080,13 @@ class API(base.Base): @scheduler_api.reroute_compute("pause") def pause(self, context, instance_id): """Pause the given instance.""" + self.update(context, instance_id, vm_state=vm_state.PAUSE) self._cast_compute_message('pause_instance', context, instance_id) @scheduler_api.reroute_compute("unpause") def unpause(self, context, instance_id): """Unpause the given instance.""" + self.update(context, instance_id, vm_state=vm_state.ACTIVE) self._cast_compute_message('unpause_instance', context, instance_id) def set_host_enabled(self, context, host, enabled): @@ -1092,21 +1113,25 @@ class API(base.Base): @scheduler_api.reroute_compute("suspend") def suspend(self, context, instance_id): """Suspend the given instance.""" + self.update(context, instance_id, vm_state=vm_state.SUSPEND) self._cast_compute_message('suspend_instance', context, instance_id) @scheduler_api.reroute_compute("resume") def resume(self, context, instance_id): """Resume the given instance.""" + self.update(context, instance_id, vm_state=vm_state.ACTIVE) self._cast_compute_message('resume_instance', context, instance_id) @scheduler_api.reroute_compute("rescue") def rescue(self, context, instance_id): """Rescue the given instance.""" + self.update(context, instance_id, vm_state=vm_state.RESCUE) self._cast_compute_message('rescue_instance', context, instance_id) @scheduler_api.reroute_compute("unrescue") def unrescue(self, context, instance_id): """Unrescue the given instance.""" + self.update(context, instance_id, vm_state=vm_state.ACTIVE) self._cast_compute_message('unrescue_instance', context, instance_id) @scheduler_api.reroute_compute("set_admin_password") diff --git a/nova/compute/manager.py b/nova/compute/manager.py index 3299268f2..34c6bc1ea 100644 --- a/nova/compute/manager.py +++ b/nova/compute/manager.py @@ -56,6 +56,8 @@ from nova import rpc from nova import utils from nova import volume from nova.compute import power_state +from nova.compute import task_state +from nova.compute import vm_state from nova.notifier import api as notifier from nova.compute.utils import terminate_volumes from nova.virt import driver @@ -146,6 +148,10 @@ class ComputeManager(manager.SchedulerDependentManager): super(ComputeManager, self).__init__(service_name="compute", *args, **kwargs) + def _instance_update(self, context, instance_id, **kwargs): + """Update an instance in the database using kwargs as value.""" + return self.db.instance_update(context, instance_id, kwargs) + def init_host(self): """Initialization for a standalone compute service.""" self.driver.init_host(host=self.host) @@ -153,8 +159,8 @@ class ComputeManager(manager.SchedulerDependentManager): instances = self.db.instance_get_all_by_host(context, self.host) for instance in instances: inst_name = instance['name'] - db_state = instance['state'] - drv_state = self._update_state(context, instance['id']) + db_state = instance['power_state'] + drv_state = self._get_power_state(context, instance) expect_running = db_state == power_state.RUNNING \ and drv_state != db_state @@ -177,34 +183,13 @@ class ComputeManager(manager.SchedulerDependentManager): LOG.warning(_('Hypervisor driver does not ' 'support firewall rules')) - def _update_state(self, context, instance_id, state=None): - """Update the state of an instance from the driver info.""" - instance_ref = self.db.instance_get(context, instance_id) - - if state is None: - try: - LOG.debug(_('Checking state of %s'), instance_ref['name']) - info = self.driver.get_info(instance_ref['name']) - except exception.NotFound: - info = None - - if info is not None: - state = info['state'] - else: - state = power_state.FAILED - - self.db.instance_set_state(context, instance_id, state) - return state - - def _update_launched_at(self, context, instance_id, launched_at=None): - """Update the launched_at parameter of the given instance.""" - data = {'launched_at': launched_at or utils.utcnow()} - self.db.instance_update(context, instance_id, data) - - def _update_image_ref(self, context, instance_id, image_ref): - """Update the image_id for the given instance.""" - data = {'image_ref': image_ref} - self.db.instance_update(context, instance_id, data) + def _get_power_state(self, context, instance): + """Retrieve the power state for the given instance.""" + LOG.debug(_('Checking state of %s'), instance['name']) + try: + return self.driver.get_info(instance['name'])["state"] + except exception.NotFound: + return power_state.FAILED def get_console_topic(self, context, **kwargs): """Retrieves the console host for a project on this host. @@ -388,13 +373,10 @@ class ComputeManager(manager.SchedulerDependentManager): # NOTE(vish): used by virt but not in database updates['injected_files'] = kwargs.get('injected_files', []) updates['admin_pass'] = kwargs.get('admin_password', None) - instance = self.db.instance_update(context, - instance_id, - updates) - self.db.instance_set_state(context, - instance_id, - power_state.NOSTATE, - 'networking') + updates['vm_state'] = vm_state.BUILD + updates['task_state'] = task_state.NETWORKING + + instance = self.db.instance_update(context, instance_id, updates) is_vpn = instance['image_ref'] == str(FLAGS.vpn_image_id) try: @@ -413,6 +395,11 @@ class ComputeManager(manager.SchedulerDependentManager): # all vif creation and network injection, maybe this is correct network_info = [] + self._instance_update(context, + instance_id, + vm_state=vm_state.BUILD, + task_state=task_state.BLOCK_DEVICE_MAPPING) + (swap, ephemerals, block_device_mapping) = self._setup_block_device_mapping( context, instance_id) @@ -422,9 +409,11 @@ class ComputeManager(manager.SchedulerDependentManager): 'ephemerals': ephemerals, 'block_device_mapping': block_device_mapping} - # TODO(vish) check to make sure the availability zone matches - self._update_state(context, instance_id, power_state.BUILDING) + self._instance_update(context, + instance_id, + task_state=task_state.SPAWN) + # TODO(vish) check to make sure the availability zone matches try: self.driver.spawn(context, instance, network_info, block_device_info) @@ -433,13 +422,21 @@ class ComputeManager(manager.SchedulerDependentManager): "virtualization enabled in the BIOS? Details: " "%(ex)s") % locals() LOG.exception(msg) + return + + current_power_state = self._get_power_state(context, instance) + self._instance_update(context, + instance_id, + power_state=current_power_state, + vm_state=vm_state.ACTIVE, + task_state=None, + launched_at=utils.utcnow()) - self._update_launched_at(context, instance_id) - self._update_state(context, instance_id) usage_info = utils.usage_from_instance(instance) notifier.notify('compute.%s' % self.host, 'compute.instance.create', notifier.INFO, usage_info) + except exception.InstanceNotFound: # FIXME(wwolf): We are just ignoring InstanceNotFound # exceptions here in case the instance was immediately @@ -523,11 +520,22 @@ class ComputeManager(manager.SchedulerDependentManager): instance_ref = self.db.instance_get(context, instance_id) LOG.audit(_("Rebuilding instance %s"), instance_id, context=context) - self._update_state(context, instance_id, power_state.BUILDING) + current_power_state = self._get_power_state(context, instance_ref) + self._instance_update(context, + instance_id, + power_state=current_power_state, + vm_state=vm_state.REBUILD, + task_state=task_state.REBUILDING) network_info = self._get_instance_nw_info(context, instance_ref) - self.driver.destroy(instance_ref, network_info) + + self._instance_update(context, + instance_id, + power_state=current_power_state, + vm_state=vm_state.REBUILD, + task_state=task_state.SPAWN) + image_ref = kwargs.get('image_ref') instance_ref.image_ref = image_ref instance_ref.injected_files = kwargs.get('injected_files', []) @@ -536,9 +544,15 @@ class ComputeManager(manager.SchedulerDependentManager): bd_mapping = self._setup_block_device_mapping(context, instance_id) self.driver.spawn(context, instance_ref, network_info, bd_mapping) - self._update_image_ref(context, instance_id, image_ref) - self._update_launched_at(context, instance_id) - self._update_state(context, instance_id) + current_power_state = self._get_power_state(context, instance_ref) + self._instance_update(context, + instance_id, + power_state=current_power_state, + vm_state=vm_state.ACTIVE, + task_state=None, + image_ref=image_ref, + launched_at=utils.utcnow()) + usage_info = utils.usage_from_instance(instance_ref, image_ref=image_ref) notifier.notify('compute.%s' % self.host, @@ -550,26 +564,34 @@ class ComputeManager(manager.SchedulerDependentManager): @checks_instance_lock def reboot_instance(self, context, instance_id): """Reboot an instance on this host.""" + LOG.audit(_("Rebooting instance %s"), instance_id, context=context) context = context.elevated() - self._update_state(context, instance_id) instance_ref = self.db.instance_get(context, instance_id) - LOG.audit(_("Rebooting instance %s"), instance_id, context=context) - if instance_ref['state'] != power_state.RUNNING: - state = instance_ref['state'] + current_power_state = self._get_power_state(context, instance_ref) + self._instance_update(context, + instance_id, + power_state=current_power_state, + vm_state=vm_state.REBOOT, + task_state=task_state.REBOOTING) + + if instance_ref['power_state'] != power_state.RUNNING: + state = instance_ref['power_state'] running = power_state.RUNNING LOG.warn(_('trying to reboot a non-running ' 'instance: %(instance_id)s (state: %(state)s ' 'expected: %(running)s)') % locals(), context=context) - self.db.instance_set_state(context, - instance_id, - power_state.NOSTATE, - 'rebooting') network_info = self._get_instance_nw_info(context, instance_ref) self.driver.reboot(instance_ref, network_info) - self._update_state(context, instance_id) + + current_power_state = self._get_power_state(context, instance_ref) + self._instance_update(context, + instance_id, + power_state=current_power_state, + vm_state=vm_state.ACTIVE, + task_state=None) @exception.wrap_exception(notifier=notifier, publisher_id=publisher_id()) def snapshot_instance(self, context, instance_id, image_id, @@ -585,37 +607,41 @@ class ComputeManager(manager.SchedulerDependentManager): :param rotation: int representing how many backups to keep around; None if rotation shouldn't be used (as in the case of snapshots) """ + if image_type != "snapshot" and image_type != "backup": + raise Exception(_('Image type not recognized %s') % image_type) + context = context.elevated() instance_ref = self.db.instance_get(context, instance_id) - #NOTE(sirp): update_state currently only refreshes the state field - # if we add is_snapshotting, we will need this refreshed too, - # potentially? - self._update_state(context, instance_id) + current_power_state = self._get_power_state(context, instance_ref) + self._instance_update(context, + instance_id, + power_state=current_power_state, + vm_state=vm_state.ACTIVE, + task_state=image_type) LOG.audit(_('instance %s: snapshotting'), instance_id, context=context) - if instance_ref['state'] != power_state.RUNNING: - state = instance_ref['state'] + + if instance_ref['power_state'] != power_state.RUNNING: + state = instance_ref['power_state'] running = power_state.RUNNING LOG.warn(_('trying to snapshot a non-running ' 'instance: %(instance_id)s (state: %(state)s ' 'expected: %(running)s)') % locals()) self.driver.snapshot(context, instance_ref, image_id) + self._instance_update(context, instance_id, task_state=None) + + if image_type == 'snapshot' and rotation: + raise exception.ImageRotationNotAllowed() + + elif image_type == 'backup' and rotation: + instance_uuid = instance_ref['uuid'] + self.rotate_backups(context, instance_uuid, backup_type, rotation) - if image_type == 'snapshot': - if rotation: - raise exception.ImageRotationNotAllowed() elif image_type == 'backup': - if rotation: - instance_uuid = instance_ref['uuid'] - self.rotate_backups(context, instance_uuid, backup_type, - rotation) - else: - raise exception.RotationRequiredForBackup() - else: - raise Exception(_('Image type not recognized %s') % image_type) + raise exception.RotationRequiredForBackup() def rotate_backups(self, context, instance_uuid, backup_type, rotation): """Delete excess backups associated to an instance. @@ -751,40 +777,51 @@ class ComputeManager(manager.SchedulerDependentManager): @checks_instance_lock def rescue_instance(self, context, instance_id): """Rescue an instance on this host.""" + LOG.audit(_('instance %s: rescuing'), instance_id, context=context) context = context.elevated() + + self._instance_update(context, + instance_id, + vm_state=vm_state.RESCUE, + task_state=task_state.RESCUING) + instance_ref = self.db.instance_get(context, instance_id) - LOG.audit(_('instance %s: rescuing'), instance_id, context=context) - self.db.instance_set_state(context, - instance_id, - power_state.NOSTATE, - 'rescuing') - _update_state = lambda result: self._update_state_callback( - self, context, instance_id, result) network_info = self._get_instance_nw_info(context, instance_ref) - self.driver.rescue(context, instance_ref, _update_state, network_info) - self._update_state(context, instance_id) + + # NOTE(blamar): None of the virt drivers use the 'callback' param + self.driver.rescue(context, instance_ref, None, network_info) + + current_power_state = self._get_power_state(context, instance_ref) + self._instance_update(context, + instance_id, + vm_state=vm_state.RESCUE, + task_state=task_state.RESCUED, + power_state=current_power_state) @exception.wrap_exception(notifier=notifier, publisher_id=publisher_id()) @checks_instance_lock def unrescue_instance(self, context, instance_id): """Rescue an instance on this host.""" + LOG.audit(_('instance %s: unrescuing'), instance_id, context=context) context = context.elevated() + + self._instance_update(context, + instance_id, + vm_state=vm_state.ACTIVE, + task_state=task_state.UNRESCUING) + instance_ref = self.db.instance_get(context, instance_id) - LOG.audit(_('instance %s: unrescuing'), instance_id, context=context) - self.db.instance_set_state(context, - instance_id, - power_state.NOSTATE, - 'unrescuing') - _update_state = lambda result: self._update_state_callback( - self, context, instance_id, result) network_info = self._get_instance_nw_info(context, instance_ref) - self.driver.unrescue(instance_ref, _update_state, network_info) - self._update_state(context, instance_id) - @staticmethod - def _update_state_callback(self, context, instance_id, result): - """Update instance state when async task completes.""" - self._update_state(context, instance_id) + # NOTE(blamar): None of the virt drivers use the 'callback' param + self.driver.unrescue(instance_ref, None, network_info) + + current_power_state = self._get_power_state(context, instance_ref) + self._instance_update(context, + instance_id, + vm_state=vm_state.ACTIVE, + task_state=None, + power_state=current_power_state) @exception.wrap_exception(notifier=notifier, publisher_id=publisher_id()) @checks_instance_lock @@ -843,11 +880,12 @@ class ComputeManager(manager.SchedulerDependentManager): # Just roll back the record. There's no need to resize down since # the 'old' VM already has the preferred attributes - self.db.instance_update(context, instance_ref['uuid'], - dict(memory_mb=instance_type['memory_mb'], - vcpus=instance_type['vcpus'], - local_gb=instance_type['local_gb'], - instance_type_id=instance_type['id'])) + self._instance_update(context, + instance_ref["uuid"], + memory_mb=instance_type['memory_mb'], + vcpus=instance_type['vcpus'], + local_gb=instance_type['local_gb'], + instance_type_id=instance_type['id']) self.driver.revert_migration(instance_ref) self.db.migration_update(context, migration_id, @@ -1000,35 +1038,45 @@ class ComputeManager(manager.SchedulerDependentManager): @checks_instance_lock def pause_instance(self, context, instance_id): """Pause an instance on this host.""" + LOG.audit(_('instance %s: pausing'), instance_id, context=context) context = context.elevated() + + self._instance_update(context, + instance_id, + vm_state=vm_state.PAUSE, + task_state=task_state.PAUSING) + instance_ref = self.db.instance_get(context, instance_id) - LOG.audit(_('instance %s: pausing'), instance_id, context=context) - self.db.instance_set_state(context, - instance_id, - power_state.NOSTATE, - 'pausing') - self.driver.pause(instance_ref, - lambda result: self._update_state_callback(self, - context, - instance_id, - result)) + self.driver.pause(instance_ref, lambda result: None) + + current_power_state = self._get_power_state(context, instance_ref) + self._instance_update(context, + instance_id, + power_state=current_power_state, + vm_state=vm_state.PAUSE, + task_state=None) @exception.wrap_exception(notifier=notifier, publisher_id=publisher_id()) @checks_instance_lock def unpause_instance(self, context, instance_id): """Unpause a paused instance on this host.""" + LOG.audit(_('instance %s: unpausing'), instance_id, context=context) context = context.elevated() + + self._instance_update(context, + instance_id, + vm_state=vm_state.ACTIVE, + task_state=task_state.UNPAUSING) + instance_ref = self.db.instance_get(context, instance_id) - LOG.audit(_('instance %s: unpausing'), instance_id, context=context) - self.db.instance_set_state(context, - instance_id, - power_state.NOSTATE, - 'unpausing') - self.driver.unpause(instance_ref, - lambda result: self._update_state_callback(self, - context, - instance_id, - result)) + self.driver.unpause(instance_ref, lambda result: None) + + current_power_state = self._get_power_state(context, instance_ref) + self._instance_update(context, + instance_id, + power_state=current_power_state, + vm_state=vm_state.ACTIVE, + task_state=None) @exception.wrap_exception(notifier=notifier, publisher_id=publisher_id()) def host_power_action(self, context, host=None, action=None): @@ -1053,33 +1101,45 @@ class ComputeManager(manager.SchedulerDependentManager): @checks_instance_lock def suspend_instance(self, context, instance_id): """Suspend the given instance.""" + LOG.audit(_('instance %s: suspending'), instance_id, context=context) context = context.elevated() + + self._instance_update(context, + instance_id, + vm_state=vm_state.SUSPEND, + task_state=task_state.SUSPENDING) + instance_ref = self.db.instance_get(context, instance_id) - LOG.audit(_('instance %s: suspending'), instance_id, context=context) - self.db.instance_set_state(context, instance_id, - power_state.NOSTATE, - 'suspending') - self.driver.suspend(instance_ref, - lambda result: self._update_state_callback(self, - context, - instance_id, - result)) + self.driver.suspend(instance_ref, lambda result: None) + + current_power_state = self._get_power_state(context, instance_ref) + self._instance_update(context, + instance_id, + power_state=current_power_state, + vm_state=vm_state.SUSPEND, + task_state=None) @exception.wrap_exception(notifier=notifier, publisher_id=publisher_id()) @checks_instance_lock def resume_instance(self, context, instance_id): """Resume the given suspended instance.""" + LOG.audit(_('instance %s: resuming'), instance_id, context=context) context = context.elevated() + + self._instance_update(context, + instance_id, + vm_state=vm_state.ACTIVE, + task_state=task_state.RESUMING) + instance_ref = self.db.instance_get(context, instance_id) - LOG.audit(_('instance %s: resuming'), instance_id, context=context) - self.db.instance_set_state(context, instance_id, - power_state.NOSTATE, - 'resuming') - self.driver.resume(instance_ref, - lambda result: self._update_state_callback(self, - context, - instance_id, - result)) + self.driver.resume(instance_ref, lambda result: None) + + current_power_state = self._get_power_state(context, instance_ref) + self._instance_update(context, + instance_id, + power_state=current_power_state, + vm_state=vm_state.ACTIVE, + task_state=None) @exception.wrap_exception(notifier=notifier, publisher_id=publisher_id()) def lock_instance(self, context, instance_id): @@ -1489,11 +1549,14 @@ class ComputeManager(manager.SchedulerDependentManager): 'block_migration': block_migration}}) # Restore instance state - self.db.instance_update(ctxt, - instance_ref['id'], - {'state_description': 'running', - 'state': power_state.RUNNING, - 'host': dest}) + current_power_state = self._get_power_state(ctxt, instance_ref) + self._instance_update(ctxt, + instance_ref["id"], + host=dest, + power_state=current_power_state, + vm_state=vm_state.ACTIVE, + task_state=None) + # Restore volume state for volume_ref in instance_ref['volumes']: volume_id = volume_ref['id'] @@ -1539,11 +1602,11 @@ class ComputeManager(manager.SchedulerDependentManager): This param specifies destination host. """ host = instance_ref['host'] - self.db.instance_update(context, - instance_ref['id'], - {'state_description': 'running', - 'state': power_state.RUNNING, - 'host': host}) + self._instance_update(context, + instance_ref['id'], + host=host, + vm_state=vm_state.ACTIVE, + task_state=None) for volume_ref in instance_ref['volumes']: volume_id = volume_ref['id'] @@ -1591,10 +1654,9 @@ class ComputeManager(manager.SchedulerDependentManager): error_list.append(ex) try: - self._poll_instance_states(context) + self._sync_power_states(context) except Exception as ex: - LOG.warning(_("Error during instance poll: %s"), - unicode(ex)) + LOG.warning(_("Error during power_state sync: %s"), unicode(ex)) error_list.append(ex) return error_list @@ -1609,68 +1671,39 @@ class ComputeManager(manager.SchedulerDependentManager): self.update_service_capabilities( self.driver.get_host_stats(refresh=True)) - def _poll_instance_states(self, context): - vm_instances = self.driver.list_instances_detail() - vm_instances = dict((vm.name, vm) for vm in vm_instances) + def _sync_power_states(self, context): + """Align power states between the database and the hypervisor. - # Keep a list of VMs not in the DB, cross them off as we find them - vms_not_found_in_db = list(vm_instances.keys()) + The hypervisor is authoritative for the power_state data, so we + simply loop over all known instances for this host and update the + power_state according to the hypervisor. If the instance is not found + then it will be set to power_state.NOSTATE, because it doesn't exist + on the hypervisor. + """ + vm_instances = self.driver.list_instances_detail() db_instances = self.db.instance_get_all_by_host(context, self.host) + num_vm_instances = len(vm_instances) + num_db_instances = len(db_instances) + + if num_vm_instances != num_db_instances: + LOG.info(_("Found %(num_db_instances)s in the database and " + "%(num_vm_instances)s on the hypervisor.") % locals()) + for db_instance in db_instances: - name = db_instance['name'] - db_state = db_instance['state'] + name = db_instance["name"] + db_power_state = db_instance['power_state'] vm_instance = vm_instances.get(name) if vm_instance is None: - # NOTE(justinsb): We have to be very careful here, because a - # concurrent operation could be in progress (e.g. a spawn) - if db_state == power_state.BUILDING: - # TODO(justinsb): This does mean that if we crash during a - # spawn, the machine will never leave the spawning state, - # but this is just the way nova is; this function isn't - # trying to correct that problem. - # We could have a separate task to correct this error. - # TODO(justinsb): What happens during a live migration? - LOG.info(_("Found instance '%(name)s' in DB but no VM. " - "State=%(db_state)s, so assuming spawn is in " - "progress.") % locals()) - vm_state = db_state - else: - LOG.info(_("Found instance '%(name)s' in DB but no VM. " - "State=%(db_state)s, so setting state to " - "shutoff.") % locals()) - vm_state = power_state.SHUTOFF - if db_instance['state_description'] == 'stopping': - self.db.instance_stop(context, db_instance['id']) - continue + vm_power_state = power_state.NOSTATE else: - vm_state = vm_instance.state - vms_not_found_in_db.remove(name) - - if (db_instance['state_description'] in ['migrating', 'stopping']): - # A situation which db record exists, but no instance" - # sometimes occurs while live-migration at src compute, - # this case should be ignored. - LOG.debug(_("Ignoring %(name)s, as it's currently being " - "migrated.") % locals()) - continue - - if vm_state != db_state: - LOG.info(_("DB/VM state mismatch. Changing state from " - "'%(db_state)s' to '%(vm_state)s'") % locals()) - self._update_state(context, db_instance['id'], vm_state) + vm_power_state = vm_instance["power_state"] - # NOTE(justinsb): We no longer auto-remove SHUTOFF instances - # It's quite hard to get them back when we do. - - # Are there VMs not in the DB? - for vm_not_found_in_db in vms_not_found_in_db: - name = vm_not_found_in_db + if vm_power_state == db_power_state: + continue - # We only care about instances that compute *should* know about - if name.startswith("instance-"): - # TODO(justinsb): What to do here? Adopt it? Shut it down? - LOG.warning(_("Found VM not in DB: '%(name)s'. Ignoring") - % locals()) + self._instance_update(context, + db_instance["id"], + power_state=vm_power_state) diff --git a/nova/compute/task_state.py b/nova/compute/task_state.py index b4dc9af51..55466c783 100644 --- a/nova/compute/task_state.py +++ b/nova/compute/task_state.py @@ -17,12 +17,27 @@ """Possible task states for instances""" -BUILD_BLOCK_DEVICE_MAPPING='block_device_mapping' +BLOCK_DEVICE_MAPPING='block_device_mapping' NETWORKING='networking' +SPAWN='spawn' +SNAPSHOT='snapshot' +BACKUP='backup' PASSWORD='password' RESIZE_PREP='resize_prep' RESIZE_MIGRATING='resize_migrating' RESIZE_MIGRATED='resize_migrated' RESIZE_FINISH='resize_finish' + +REBUILDING='rebuilding' + +REBOOTING='rebooting' +PAUSING='pausing' +UNPAUSING='unpausing' +SUSPENDING='suspending' +RESUMING='resuming' + +RESCUING='rescuing' +RESCUED='rescued' +UNRESCUING='unrescuing' diff --git a/nova/compute/vm_state.py b/nova/compute/vm_state.py index e81cba1f0..a1bca6ef4 100644 --- a/nova/compute/vm_state.py +++ b/nova/compute/vm_state.py @@ -17,19 +17,15 @@ """Possible vm states for instances""" +ACTIVE='active' BUILD='build' REBUILD='rebuild' REBOOT='reboot' DELETE='delete' STOP='stop' -START='start' +MIGRATE='migrate' RESIZE='resize' VERIFY_RESIZE='verify_resize' PAUSE='pause' -UNPAUSE='unpause' - SUSPEND='suspend' -RESUME='resume' - RESCUE='rescue' -UNRESCUE='unrescue' -- cgit From cab13dbfd652d1fcf9443e796e50f7eb374fc3fc Mon Sep 17 00:00:00 2001 From: Brian Lamar Date: Thu, 18 Aug 2011 12:34:01 -0400 Subject: Updated a number of items to pave the way for new states. --- nova/compute/manager.py | 31 +++++++++++----------- nova/db/sqlalchemy/api.py | 4 +-- .../versions/037_update_instance_states.py | 2 +- nova/db/sqlalchemy/models.py | 16 +++-------- nova/scheduler/driver.py | 11 ++++---- nova/tests/scheduler/test_scheduler.py | 13 ++++++--- nova/tests/test_compute.py | 29 ++++++++++---------- 7 files changed, 51 insertions(+), 55 deletions(-) diff --git a/nova/compute/manager.py b/nova/compute/manager.py index 34c6bc1ea..cb19a19cc 100644 --- a/nova/compute/manager.py +++ b/nova/compute/manager.py @@ -241,11 +241,6 @@ class ComputeManager(manager.SchedulerDependentManager): def _setup_block_device_mapping(self, context, instance_id): """setup volumes for block device mapping""" - self.db.instance_set_state(context, - instance_id, - power_state.NOSTATE, - 'block_device_mapping') - volume_api = volume.API() block_device_mapping = [] swap = None @@ -472,8 +467,7 @@ class ComputeManager(manager.SchedulerDependentManager): for volume in volumes: self._detach_volume(context, instance_id, volume['id'], False) - if (instance['state'] == power_state.SHUTOFF and - instance['state_description'] != 'stopped'): + if instance['power_state'] == power_state.SHUTOFF: self.db.instance_destroy(context, instance_id) raise exception.Error(_('trying to destroy already destroyed' ' instance: %s') % instance_id) @@ -532,16 +526,22 @@ class ComputeManager(manager.SchedulerDependentManager): self._instance_update(context, instance_id, - power_state=current_power_state, vm_state=vm_state.REBUILD, - task_state=task_state.SPAWN) + task_state=task_state.BLOCK_DEVICE_MAPPING) + + bd_mapping = self._setup_block_device_mapping(context, instance_id) image_ref = kwargs.get('image_ref') instance_ref.image_ref = image_ref instance_ref.injected_files = kwargs.get('injected_files', []) network_info = self.network_api.get_instance_nw_info(context, - instance_ref) - bd_mapping = self._setup_block_device_mapping(context, instance_id) + instance_ref) + + self._instance_update(context, + instance_id, + vm_state=vm_state.REBUILD, + task_state=task_state.SPAWN) + self.driver.spawn(context, instance_ref, network_info, bd_mapping) current_power_state = self._get_power_state(context, instance_ref) @@ -709,7 +709,7 @@ class ComputeManager(manager.SchedulerDependentManager): for i in xrange(max_tries): instance_ref = self.db.instance_get(context, instance_id) instance_id = instance_ref["id"] - instance_state = instance_ref["state"] + instance_state = instance_ref["power_state"] expected_state = power_state.RUNNING if instance_state != expected_state: @@ -744,7 +744,7 @@ class ComputeManager(manager.SchedulerDependentManager): context = context.elevated() instance_ref = self.db.instance_get(context, instance_id) instance_id = instance_ref['id'] - instance_state = instance_ref['state'] + instance_state = instance_ref['power_state'] expected_state = power_state.RUNNING if instance_state != expected_state: LOG.warn(_('trying to inject a file into a non-running ' @@ -762,7 +762,7 @@ class ComputeManager(manager.SchedulerDependentManager): context = context.elevated() instance_ref = self.db.instance_get(context, instance_id) instance_id = instance_ref['id'] - instance_state = instance_ref['state'] + instance_state = instance_ref['power_state'] expected_state = power_state.RUNNING if instance_state != expected_state: LOG.warn(_('trying to update agent on a non-running ' @@ -1092,7 +1092,7 @@ class ComputeManager(manager.SchedulerDependentManager): def get_diagnostics(self, context, instance_id): """Retrieve diagnostics for an instance on this host.""" instance_ref = self.db.instance_get(context, instance_id) - if instance_ref["state"] == power_state.RUNNING: + if instance_ref["power_state"] == power_state.RUNNING: LOG.audit(_("instance %s: retrieving diagnostics"), instance_id, context=context) return self.driver.get_diagnostics(instance_ref) @@ -1682,6 +1682,7 @@ class ComputeManager(manager.SchedulerDependentManager): """ vm_instances = self.driver.list_instances_detail() + vm_instances = dict((vm.name, vm) for vm in vm_instances) db_instances = self.db.instance_get_all_by_host(context, self.host) num_vm_instances = len(vm_instances) diff --git a/nova/db/sqlalchemy/api.py b/nova/db/sqlalchemy/api.py index e7d02cb5d..67736dea2 100644 --- a/nova/db/sqlalchemy/api.py +++ b/nova/db/sqlalchemy/api.py @@ -1079,8 +1079,8 @@ def instance_stop(context, instance_id): session.query(models.Instance).\ filter_by(id=instance_id).\ update({'host': None, - 'state': power_state.SHUTOFF, - 'state_description': 'stopped', + 'vm_state': vm_state.STOP, + 'task_state': None, 'updated_at': literal_column('updated_at')}) session.query(models.SecurityGroupInstanceAssociation).\ filter_by(instance_id=instance_id).\ diff --git a/nova/db/sqlalchemy/migrate_repo/versions/037_update_instance_states.py b/nova/db/sqlalchemy/migrate_repo/versions/037_update_instance_states.py index 0bbe39def..07efbf90f 100644 --- a/nova/db/sqlalchemy/migrate_repo/versions/037_update_instance_states.py +++ b/nova/db/sqlalchemy/migrate_repo/versions/037_update_instance_states.py @@ -14,7 +14,7 @@ # License for the specific language governing permissions and limitations # under the License. -from sqlalchemy import MetaData, Table +from sqlalchemy import MetaData, Table, Column, String meta = MetaData() diff --git a/nova/db/sqlalchemy/models.py b/nova/db/sqlalchemy/models.py index f2a4680b0..d2987cacc 100644 --- a/nova/db/sqlalchemy/models.py +++ b/nova/db/sqlalchemy/models.py @@ -193,8 +193,9 @@ class Instance(BASE, NovaBase): key_name = Column(String(255)) key_data = Column(Text) - state = Column(Integer) - state_description = Column(String(255)) + power_state = Column(Integer) + vm_state = Column(String(255)) + task_state = Column(String(255)) memory_mb = Column(Integer) vcpus = Column(Integer) @@ -232,17 +233,6 @@ class Instance(BASE, NovaBase): root_device_name = Column(String(255)) - # TODO(vish): see Ewan's email about state improvements, probably - # should be in a driver base class or some such - # vmstate_state = running, halted, suspended, paused - # power_state = what we have - # task_state = transitory and may trigger power state transition - - #@validates('state') - #def validate_state(self, key, state): - # assert(state in ['nostate', 'running', 'blocked', 'paused', - # 'shutdown', 'shutoff', 'crashed']) - class InstanceActions(BASE, NovaBase): """Represents a guest VM's actions and results""" diff --git a/nova/scheduler/driver.py b/nova/scheduler/driver.py index f28353f05..b788b996f 100644 --- a/nova/scheduler/driver.py +++ b/nova/scheduler/driver.py @@ -30,6 +30,8 @@ from nova import log as logging from nova import rpc from nova import utils from nova.compute import power_state +from nova.compute import task_state +from nova.compute import vm_state from nova.api.ec2 import ec2utils @@ -104,10 +106,8 @@ class Scheduler(object): dest, block_migration) # Changing instance_state. - db.instance_set_state(context, - instance_id, - power_state.PAUSED, - 'migrating') + values = {"vm_state": vm_state.MIGRATE} + db.instance_update(context, instance_id, values) # Changing volume state for volume_ref in instance_ref['volumes']: @@ -129,8 +129,7 @@ class Scheduler(object): """ # Checking instance is running. - if (power_state.RUNNING != instance_ref['state'] or \ - 'running' != instance_ref['state_description']): + if instance_ref['power_state'] != power_state.RUNNING: instance_id = ec2utils.id_to_ec2_id(instance_ref['id']) raise exception.InstanceNotRunning(instance_id=instance_id) diff --git a/nova/tests/scheduler/test_scheduler.py b/nova/tests/scheduler/test_scheduler.py index 158df2a27..1b5e131c9 100644 --- a/nova/tests/scheduler/test_scheduler.py +++ b/nova/tests/scheduler/test_scheduler.py @@ -40,6 +40,7 @@ from nova.scheduler import driver from nova.scheduler import manager from nova.scheduler import multi from nova.compute import power_state +from nova.compute import vm_state FLAGS = flags.FLAGS @@ -94,6 +95,9 @@ class SchedulerTestCase(test.TestCase): inst['vcpus'] = kwargs.get('vcpus', 1) inst['memory_mb'] = kwargs.get('memory_mb', 10) inst['local_gb'] = kwargs.get('local_gb', 20) + inst['vm_state'] = kwargs.get('vm_state', vm_state.ACTIVE) + inst['power_state'] = kwargs.get('power_state', power_state.RUNNING) + inst['task_state'] = kwargs.get('task_state', None) return db.instance_create(ctxt, inst) def test_fallback(self): @@ -271,8 +275,9 @@ class SimpleDriverTestCase(test.TestCase): inst['memory_mb'] = kwargs.get('memory_mb', 20) inst['local_gb'] = kwargs.get('local_gb', 30) inst['launched_on'] = kwargs.get('launghed_on', 'dummy') - inst['state_description'] = kwargs.get('state_description', 'running') - inst['state'] = kwargs.get('state', power_state.RUNNING) + inst['vm_state'] = kwargs.get('vm_state', vm_state.ACTIVE) + inst['task_state'] = kwargs.get('task_state', None) + inst['power_state'] = kwargs.get('power_state', power_state.RUNNING) return db.instance_create(self.context, inst)['id'] def _create_volume(self): @@ -664,14 +669,14 @@ class SimpleDriverTestCase(test.TestCase): block_migration=False) i_ref = db.instance_get(self.context, instance_id) - self.assertTrue(i_ref['state_description'] == 'migrating') + self.assertTrue(i_ref['vm_state'] == vm_state.MIGRATE) db.instance_destroy(self.context, instance_id) db.volume_destroy(self.context, v_ref['id']) def test_live_migration_src_check_instance_not_running(self): """The instance given by instance_id is not running.""" - instance_id = self._create_instance(state_description='migrating') + instance_id = self._create_instance(power_state=power_state.NOSTATE) i_ref = db.instance_get(self.context, instance_id) try: diff --git a/nova/tests/test_compute.py b/nova/tests/test_compute.py index e2fa3b140..f310eaff6 100644 --- a/nova/tests/test_compute.py +++ b/nova/tests/test_compute.py @@ -23,6 +23,7 @@ from nova import compute from nova.compute import instance_types from nova.compute import manager as compute_manager from nova.compute import power_state +from nova.compute import vm_state from nova import context from nova import db from nova.db.sqlalchemy import models @@ -747,8 +748,8 @@ class ComputeTestCase(test.TestCase): 'block_migration': False, 'disk': None}}).\ AndRaise(rpc.RemoteError('', '', '')) - dbmock.instance_update(c, i_ref['id'], {'state_description': 'running', - 'state': power_state.RUNNING, + dbmock.instance_update(c, i_ref['id'], {'vm_state': vm_state.ACTIVE, + 'task_state': None, 'host': i_ref['host']}) for v in i_ref['volumes']: dbmock.volume_update(c, v['id'], {'status': 'in-use'}) @@ -779,8 +780,8 @@ class ComputeTestCase(test.TestCase): 'block_migration': False, 'disk': None}}).\ AndRaise(rpc.RemoteError('', '', '')) - dbmock.instance_update(c, i_ref['id'], {'state_description': 'running', - 'state': power_state.RUNNING, + dbmock.instance_update(c, i_ref['id'], {'vm_state': vm_state.ACTIVE, + 'task_state': None, 'host': i_ref['host']}) self.compute.db = dbmock @@ -825,8 +826,8 @@ class ComputeTestCase(test.TestCase): c = context.get_admin_context() instance_id = self._create_instance() i_ref = db.instance_get(c, instance_id) - db.instance_update(c, i_ref['id'], {'state_description': 'migrating', - 'state': power_state.PAUSED}) + db.instance_update(c, i_ref['id'], {'vm_state': vm_state.MIGRATE, + 'power_state': power_state.PAUSED}) v_ref = db.volume_create(c, {'size': 1, 'instance_id': instance_id}) fix_addr = db.fixed_ip_create(c, {'address': '1.1.1.1', 'instance_id': instance_id}) @@ -887,7 +888,7 @@ class ComputeTestCase(test.TestCase): instances = db.instance_get_all(context.get_admin_context()) LOG.info(_("After force-killing instances: %s"), instances) self.assertEqual(len(instances), 1) - self.assertEqual(power_state.SHUTOFF, instances[0]['state']) + self.assertEqual(power_state.NOSTATE, instances[0]['power_state']) def test_get_all_by_name_regexp(self): """Test searching instances by name (display_name)""" @@ -1307,25 +1308,25 @@ class ComputeTestCase(test.TestCase): """Test searching instances by state""" c = context.get_admin_context() - instance_id1 = self._create_instance({'state': power_state.SHUTDOWN}) + instance_id1 = self._create_instance({'power_state': power_state.SHUTDOWN}) instance_id2 = self._create_instance({ 'id': 2, - 'state': power_state.RUNNING}) + 'power_state': power_state.RUNNING}) instance_id3 = self._create_instance({ 'id': 10, - 'state': power_state.RUNNING}) + 'power_state': power_state.RUNNING}) instances = self.compute_api.get_all(c, - search_opts={'state': power_state.SUSPENDED}) + search_opts={'power_state': power_state.SUSPENDED}) self.assertEqual(len(instances), 0) instances = self.compute_api.get_all(c, - search_opts={'state': power_state.SHUTDOWN}) + search_opts={'power_state': power_state.SHUTDOWN}) self.assertEqual(len(instances), 1) self.assertEqual(instances[0].id, instance_id1) instances = self.compute_api.get_all(c, - search_opts={'state': power_state.RUNNING}) + search_opts={'power_state': power_state.RUNNING}) self.assertEqual(len(instances), 2) instance_ids = [instance.id for instance in instances] self.assertTrue(instance_id2 in instance_ids) @@ -1333,7 +1334,7 @@ class ComputeTestCase(test.TestCase): # Test passing a list as search arg instances = self.compute_api.get_all(c, - search_opts={'state': [power_state.SHUTDOWN, + search_opts={'power_state': [power_state.SHUTDOWN, power_state.RUNNING]}) self.assertEqual(len(instances), 3) -- cgit From 805c1cec609b39ee5a0ba1517bf2f1d41e0c4fa9 Mon Sep 17 00:00:00 2001 From: Jesse Andrews Date: Thu, 18 Aug 2011 15:05:35 -0700 Subject: allow specification of key pair/security group info via metadata extract metadata about keypair / security group configuration from server metadata sent on create. This allows users to use these extensions with their existing api implementations. Also remove the code that choose the first key pair in the tenant - since it seems to have been used during the development of os api --- nova/api/openstack/create_instance_helper.py | 21 ++++++++------------- nova/api/openstack/servers.py | 1 - 2 files changed, 8 insertions(+), 14 deletions(-) diff --git a/nova/api/openstack/create_instance_helper.py b/nova/api/openstack/create_instance_helper.py index b4a08dac0..031b06921 100644 --- a/nova/api/openstack/create_instance_helper.py +++ b/nova/api/openstack/create_instance_helper.py @@ -73,20 +73,15 @@ class CreateInstanceHelper(object): if not 'server' in body: raise exc.HTTPUnprocessableEntity() - server_dict = body['server'] context = req.environ['nova.context'] + server_dict = body['server'] + metadata = server_dict.get('metadata', {}) password = self.controller._get_server_admin_password(server_dict) - key_name = None - key_data = None - # TODO(vish): Key pair access should move into a common library - # instead of being accessed directly from the db. - key_pairs = db.key_pair_get_all_by_user(context.elevated(), - context.user_id) - if key_pairs: - key_pair = key_pairs[0] - key_name = key_pair['name'] - key_data = key_pair['public_key'] + # NOTE(ja): extract key_name and security_group from metadata + # to use in os extensions for firewall & keypairs + key_name = metadata.get('key_name') + security_group = metadata.get('security_group') image_href = self.controller._image_ref_from_req_data(body) # If the image href was generated by nova api, strip image_href @@ -155,8 +150,8 @@ class CreateInstanceHelper(object): display_name=name, display_description=name, key_name=key_name, - key_data=key_data, - metadata=server_dict.get('metadata', {}), + security_group=security_group, + metadata=metadata, injected_files=injected_files, admin_password=password, zone_blob=zone_blob, diff --git a/nova/api/openstack/servers.py b/nova/api/openstack/servers.py index 335ecad86..2cf4e3eda 100644 --- a/nova/api/openstack/servers.py +++ b/nova/api/openstack/servers.py @@ -146,7 +146,6 @@ class Controller(object): def create(self, req, body): """ Creates a new server for a given user """ extra_values = None - result = None extra_values, instances = self.helper.create_instance( req, body, self.compute_api.create) -- cgit From bbe414cba5d389b553fb3122a3a7dbc9c6d898f2 Mon Sep 17 00:00:00 2001 From: Jesse Andrews Date: Thu, 18 Aug 2011 16:26:32 -0700 Subject: follow same pattern as userdata (not metadata apporach) --- nova/api/openstack/create_instance_helper.py | 27 +++++++++++++-------------- nova/api/openstack/views/servers.py | 3 +++ 2 files changed, 16 insertions(+), 14 deletions(-) diff --git a/nova/api/openstack/create_instance_helper.py b/nova/api/openstack/create_instance_helper.py index e64a076c8..c2e9e76ac 100644 --- a/nova/api/openstack/create_instance_helper.py +++ b/nova/api/openstack/create_instance_helper.py @@ -75,13 +75,15 @@ class CreateInstanceHelper(object): context = req.environ['nova.context'] server_dict = body['server'] - metadata = server_dict.get('metadata', {}) password = self.controller._get_server_admin_password(server_dict) - # NOTE(ja): extract key_name and security_group from metadata - # to use in os extensions for firewall & keypairs - key_name = metadata.get('key_name') - security_group = metadata.get('security_group') + if not 'name' in server_dict: + msg = _("Server name is not defined") + raise exc.HTTPBadRequest(explanation=msg) + + name = server_dict['name'] + self._validate_server_name(name) + name = name.strip() image_href = self.controller._image_ref_from_req_data(body) # If the image href was generated by nova api, strip image_href @@ -112,16 +114,13 @@ class CreateInstanceHelper(object): msg = _("Invalid flavorRef provided.") raise exc.HTTPBadRequest(explanation=msg) - if not 'name' in server_dict: - msg = _("Server name is not defined") - raise exc.HTTPBadRequest(explanation=msg) - zone_blob = server_dict.get('blob') - user_data = server_dict.get('user_data') availability_zone = server_dict.get('availability_zone') - name = server_dict['name'] - self._validate_server_name(name) - name = name.strip() + + # optional openstack extensions: + key_name = server_dict.get('key_name') + security_group = server_dict.get('security_group') + user_data = server_dict.get('user_data') reservation_id = server_dict.get('reservation_id') min_count = server_dict.get('min_count') @@ -152,7 +151,7 @@ class CreateInstanceHelper(object): display_description=name, key_name=key_name, security_group=security_group, - metadata=metadata, + metadata=server_dict.get('metadata', {}), injected_files=injected_files, admin_password=password, zone_blob=zone_blob, diff --git a/nova/api/openstack/views/servers.py b/nova/api/openstack/views/servers.py index edc328129..ac2de0c57 100644 --- a/nova/api/openstack/views/servers.py +++ b/nova/api/openstack/views/servers.py @@ -78,6 +78,9 @@ class ViewBuilder(object): metadata[item['key']] = str(item['value']) inst_dict['metadata'] = metadata + inst_dict['key_name'] = inst.get('key_name') + inst_dict['security_group'] = inst.get('security_group') + inst_dict['hostId'] = '' if inst.get('host'): inst_dict['hostId'] = hashlib.sha224(inst['host']).hexdigest() -- cgit From f4f0ce95946962be73cfc509b24fd000fc931198 Mon Sep 17 00:00:00 2001 From: Jesse Andrews Date: Thu, 18 Aug 2011 16:46:16 -0700 Subject: tests pass --- nova/api/openstack/views/servers.py | 4 ++-- nova/tests/api/openstack/test_servers.py | 14 ++++++++++++++ 2 files changed, 16 insertions(+), 2 deletions(-) diff --git a/nova/api/openstack/views/servers.py b/nova/api/openstack/views/servers.py index ac2de0c57..285228b30 100644 --- a/nova/api/openstack/views/servers.py +++ b/nova/api/openstack/views/servers.py @@ -78,8 +78,8 @@ class ViewBuilder(object): metadata[item['key']] = str(item['value']) inst_dict['metadata'] = metadata - inst_dict['key_name'] = inst.get('key_name') - inst_dict['security_group'] = inst.get('security_group') + inst_dict['key_name'] = inst.get('key_name', '') + inst_dict['security_group'] = inst.get('security_group', '') inst_dict['hostId'] = '' if inst.get('host'): diff --git a/nova/tests/api/openstack/test_servers.py b/nova/tests/api/openstack/test_servers.py index a510d7d97..e374abb6b 100644 --- a/nova/tests/api/openstack/test_servers.py +++ b/nova/tests/api/openstack/test_servers.py @@ -335,6 +335,8 @@ class ServersTest(test.TestCase): "name": "server1", "status": "BUILD", "hostId": '', + "key_name": '', + "security_group": '', "image": { "id": "10", "links": [ @@ -497,6 +499,8 @@ class ServersTest(test.TestCase): "name": "server1", "status": "ACTIVE", "hostId": '', + "key_name": '', + "security_group": '', "image": { "id": "10", "links": [ @@ -588,6 +592,8 @@ class ServersTest(test.TestCase): "name": "server1", "status": "ACTIVE", "hostId": '', + "key_name": '', + "security_group": '', "image": { "id": "10", "links": [ @@ -3094,6 +3100,8 @@ class ServersViewBuilderV11Test(test.TestCase): "name": "test_server", "status": "BUILD", "hostId": '', + "key_name": '', + "security_group": '', "image": { "id": "5", "links": [ @@ -3145,6 +3153,8 @@ class ServersViewBuilderV11Test(test.TestCase): "name": "test_server", "status": "ACTIVE", "hostId": '', + "key_name": '', + "security_group": '', "image": { "id": "5", "links": [ @@ -3200,6 +3210,8 @@ class ServersViewBuilderV11Test(test.TestCase): "name": "test_server", "status": "BUILD", "hostId": '', + "key_name": '', + "security_group": '', "image": { "id": "5", "links": [ @@ -3265,6 +3277,8 @@ class ServerXMLSerializationTest(test.TestCase): "name": "test_server", "status": "BUILD", "hostId": 'e4d909c290d0fb1ca068ffaddf22cbd0', + "key_name": '', + "security_group": '', "image": { "id": "5", "links": [ -- cgit From 203309693fc2dd648b9d4b211686228557728c89 Mon Sep 17 00:00:00 2001 From: Anthony Young Date: Thu, 18 Aug 2011 19:05:40 -0700 Subject: remove security groups, improve exception handling, add tests --- nova/api/openstack/create_instance_helper.py | 5 ++-- nova/api/openstack/views/servers.py | 1 - nova/tests/api/openstack/test_servers.py | 42 +++++++++++++++++++++++----- 3 files changed, 38 insertions(+), 10 deletions(-) diff --git a/nova/api/openstack/create_instance_helper.py b/nova/api/openstack/create_instance_helper.py index c2e9e76ac..84d8c0c39 100644 --- a/nova/api/openstack/create_instance_helper.py +++ b/nova/api/openstack/create_instance_helper.py @@ -119,7 +119,6 @@ class CreateInstanceHelper(object): # optional openstack extensions: key_name = server_dict.get('key_name') - security_group = server_dict.get('security_group') user_data = server_dict.get('user_data') reservation_id = server_dict.get('reservation_id') @@ -150,7 +149,6 @@ class CreateInstanceHelper(object): display_name=name, display_description=name, key_name=key_name, - security_group=security_group, metadata=server_dict.get('metadata', {}), injected_files=injected_files, admin_password=password, @@ -168,6 +166,9 @@ class CreateInstanceHelper(object): except exception.FlavorNotFound as error: msg = _("Invalid flavorRef provided.") raise exc.HTTPBadRequest(explanation=msg) + except exception.KeypairNotFound as error: + msg = _("Invalid key_name provided.") + raise exc.HTTPBadRequest(explanation=msg) # Let the caller deal with unhandled exceptions. def _handle_quota_error(self, error): diff --git a/nova/api/openstack/views/servers.py b/nova/api/openstack/views/servers.py index 285228b30..c0df5abe6 100644 --- a/nova/api/openstack/views/servers.py +++ b/nova/api/openstack/views/servers.py @@ -79,7 +79,6 @@ class ViewBuilder(object): inst_dict['metadata'] = metadata inst_dict['key_name'] = inst.get('key_name', '') - inst_dict['security_group'] = inst.get('security_group', '') inst_dict['hostId'] = '' if inst.get('host'): diff --git a/nova/tests/api/openstack/test_servers.py b/nova/tests/api/openstack/test_servers.py index e374abb6b..139820c09 100644 --- a/nova/tests/api/openstack/test_servers.py +++ b/nova/tests/api/openstack/test_servers.py @@ -336,7 +336,6 @@ class ServersTest(test.TestCase): "status": "BUILD", "hostId": '', "key_name": '', - "security_group": '', "image": { "id": "10", "links": [ @@ -500,7 +499,6 @@ class ServersTest(test.TestCase): "status": "ACTIVE", "hostId": '', "key_name": '', - "security_group": '', "image": { "id": "10", "links": [ @@ -593,7 +591,6 @@ class ServersTest(test.TestCase): "status": "ACTIVE", "hostId": '', "key_name": '', - "security_group": '', "image": { "id": "10", "links": [ @@ -1643,6 +1640,41 @@ class ServersTest(test.TestCase): self.assertEqual(expected_flavor, server['flavor']) self.assertEqual(expected_image, server['image']) + def test_create_instance_v1_1_invalid_key_name(self): + self._setup_for_create_instance() + + image_href = 'http://localhost/v1.1/images/2' + flavor_ref = 'http://localhost/flavors/3' + body = dict(server=dict( + name='server_test', imageRef=image_href, flavorRef=flavor_ref, + key_name='nonexistentkey')) + req = webob.Request.blank('/v1.1/servers') + req.method = 'POST' + req.body = json.dumps(body) + req.headers["content-type"] = "application/json" + res = req.get_response(fakes.wsgi_app()) + self.assertEqual(res.status_int, 400) + + def test_create_instance_v1_1_valid_key_name(self): + self._setup_for_create_instance() + + def key_pair_get(context, user_id, key_name): + return dict(name='mykey', public_key='public_key') + + self.stubs.Set(nova.db, 'key_pair_get', key_pair_get) + + image_href = 'http://localhost/v1.1/images/2' + flavor_ref = 'http://localhost/flavors/3' + body = dict(server=dict( + name='server_test', imageRef=image_href, flavorRef=flavor_ref, + key_name='mykey')) + req = webob.Request.blank('/v1.1/servers') + req.method = 'POST' + req.body = json.dumps(body) + req.headers["content-type"] = "application/json" + res = req.get_response(fakes.wsgi_app()) + self.assertEqual(res.status_int, 202) + def test_create_instance_v1_1_invalid_flavor_href(self): self._setup_for_create_instance() @@ -3101,7 +3133,6 @@ class ServersViewBuilderV11Test(test.TestCase): "status": "BUILD", "hostId": '', "key_name": '', - "security_group": '', "image": { "id": "5", "links": [ @@ -3154,7 +3185,6 @@ class ServersViewBuilderV11Test(test.TestCase): "status": "ACTIVE", "hostId": '', "key_name": '', - "security_group": '', "image": { "id": "5", "links": [ @@ -3211,7 +3241,6 @@ class ServersViewBuilderV11Test(test.TestCase): "status": "BUILD", "hostId": '', "key_name": '', - "security_group": '', "image": { "id": "5", "links": [ @@ -3278,7 +3307,6 @@ class ServerXMLSerializationTest(test.TestCase): "status": "BUILD", "hostId": 'e4d909c290d0fb1ca068ffaddf22cbd0', "key_name": '', - "security_group": '', "image": { "id": "5", "links": [ -- cgit From 1ab7b11a439cef8b48621355467d5fb460d9b2e2 Mon Sep 17 00:00:00 2001 From: Jesse Andrews Date: Thu, 18 Aug 2011 22:52:45 -0700 Subject: add key_name/data support to server stub --- nova/tests/api/openstack/test_servers.py | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) diff --git a/nova/tests/api/openstack/test_servers.py b/nova/tests/api/openstack/test_servers.py index 139820c09..605b3fee3 100644 --- a/nova/tests/api/openstack/test_servers.py +++ b/nova/tests/api/openstack/test_servers.py @@ -145,7 +145,7 @@ def instance_addresses(context, instance_id): def stub_instance(id, user_id='fake', project_id='fake', private_address=None, public_addresses=None, host=None, power_state=0, reservation_id="", uuid=FAKE_UUID, image_ref="10", - flavor_id="1", interfaces=None, name=None): + flavor_id="1", interfaces=None, name=None, key_name=''): metadata = [] metadata.append(InstanceMetadata(key='seq', value=id)) @@ -160,6 +160,11 @@ def stub_instance(id, user_id='fake', project_id='fake', private_address=None, if host is not None: host = str(host) + if key_name: + key_data = 'FAKE' + else: + key_data = '' + # ReservationID isn't sent back, hack it in there. server_name = name or "server%s" % id if reservation_id != "": @@ -176,8 +181,8 @@ def stub_instance(id, user_id='fake', project_id='fake', private_address=None, "kernel_id": "", "ramdisk_id": "", "launch_index": 0, - "key_name": "", - "key_data": "", + "key_name": key_name, + "key_data": key_data, "state": power_state, "state_description": "", "memory_mb": 0, -- cgit From 9b96b24ec93864731b6fc5031d2eceb22398be24 Mon Sep 17 00:00:00 2001 From: Dan Prince Date: Fri, 19 Aug 2011 09:30:52 -0400 Subject: Bump migration number. --- .../versions/037_update_instance_states.py | 57 ---------------------- .../versions/039_update_instance_states.py | 57 ++++++++++++++++++++++ 2 files changed, 57 insertions(+), 57 deletions(-) delete mode 100644 nova/db/sqlalchemy/migrate_repo/versions/037_update_instance_states.py create mode 100644 nova/db/sqlalchemy/migrate_repo/versions/039_update_instance_states.py diff --git a/nova/db/sqlalchemy/migrate_repo/versions/037_update_instance_states.py b/nova/db/sqlalchemy/migrate_repo/versions/037_update_instance_states.py deleted file mode 100644 index 07efbf90f..000000000 --- a/nova/db/sqlalchemy/migrate_repo/versions/037_update_instance_states.py +++ /dev/null @@ -1,57 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright 2010 OpenStack LLC. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from sqlalchemy import MetaData, Table, Column, String - -meta = MetaData() - -c_task_state = Column('task_state', - String(length=255, convert_unicode=False, - assert_unicode=None, unicode_error=None, - _warn_on_bytestring=False), - nullable=True) - - -def upgrade(migrate_engine): - # Upgrade operations go here. Don't create your own engine; - # bind migrate_engine to your metadata - meta.bind = migrate_engine - - instances = Table('instances', meta, autoload=True, - autoload_with=migrate_engine) - - c_state = instances.c.state - c_state.alter(name='power_state') - - c_vm_state = instances.c.state_description - c_vm_state.alter(name='vm_state') - - instances.create_column(c_task_state) - - -def downgrade(migrate_engine): - meta.bind = migrate_engine - - instances = Table('instances', meta, autoload=True, - autoload_with=migrate_engine) - - c_state = instances.c.power_state - c_state.alter(name='state') - - c_vm_state = instances.c.vm_state - c_vm_state.alter(name='state_description') - - instances.drop_column('task_state') diff --git a/nova/db/sqlalchemy/migrate_repo/versions/039_update_instance_states.py b/nova/db/sqlalchemy/migrate_repo/versions/039_update_instance_states.py new file mode 100644 index 000000000..07efbf90f --- /dev/null +++ b/nova/db/sqlalchemy/migrate_repo/versions/039_update_instance_states.py @@ -0,0 +1,57 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 OpenStack LLC. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from sqlalchemy import MetaData, Table, Column, String + +meta = MetaData() + +c_task_state = Column('task_state', + String(length=255, convert_unicode=False, + assert_unicode=None, unicode_error=None, + _warn_on_bytestring=False), + nullable=True) + + +def upgrade(migrate_engine): + # Upgrade operations go here. Don't create your own engine; + # bind migrate_engine to your metadata + meta.bind = migrate_engine + + instances = Table('instances', meta, autoload=True, + autoload_with=migrate_engine) + + c_state = instances.c.state + c_state.alter(name='power_state') + + c_vm_state = instances.c.state_description + c_vm_state.alter(name='vm_state') + + instances.create_column(c_task_state) + + +def downgrade(migrate_engine): + meta.bind = migrate_engine + + instances = Table('instances', meta, autoload=True, + autoload_with=migrate_engine) + + c_state = instances.c.power_state + c_state.alter(name='state') + + c_vm_state = instances.c.vm_state + c_vm_state.alter(name='state_description') + + instances.drop_column('task_state') -- cgit From 3bd386cdba53f6a54a29e510c0f9eecf9b9ea7d9 Mon Sep 17 00:00:00 2001 From: Dan Prince Date: Fri, 19 Aug 2011 15:13:40 -0400 Subject: vm_state --> vm_states --- nova/api/ec2/cloud.py | 24 ++++++++-------- nova/compute/api.py | 46 +++++++++++++++---------------- nova/compute/manager.py | 50 +++++++++++++++++----------------- nova/compute/vm_state.py | 31 --------------------- nova/compute/vm_states.py | 31 +++++++++++++++++++++ nova/scheduler/driver.py | 4 +-- nova/tests/scheduler/test_scheduler.py | 2 +- nova/tests/test_compute.py | 8 +++--- 8 files changed, 98 insertions(+), 98 deletions(-) delete mode 100644 nova/compute/vm_state.py create mode 100644 nova/compute/vm_states.py diff --git a/nova/api/ec2/cloud.py b/nova/api/ec2/cloud.py index 9aebf92e3..4b69cc272 100644 --- a/nova/api/ec2/cloud.py +++ b/nova/api/ec2/cloud.py @@ -47,6 +47,7 @@ from nova import utils from nova import volume from nova.api.ec2 import ec2utils from nova.compute import instance_types +from nova.compute import vm_states from nova.image import s3 @@ -1039,11 +1040,10 @@ class CloudController(object): def _format_attr_instance_initiated_shutdown_behavior(instance, result): - state_description = instance['state_description'] - state_to_value = {'stopping': 'stop', - 'stopped': 'stop', - 'terminating': 'terminate'} - value = state_to_value.get(state_description) + vm_state = instance['vm_state'] + state_to_value = {vm_states.STOP: 'stop', + vm_states.DELETE: 'terminate'} + value = state_to_value.get(vm_state) if value: result['instanceInitiatedShutdownBehavior'] = value @@ -1198,8 +1198,8 @@ class CloudController(object): self._format_kernel_id(instance, i, 'kernelId') self._format_ramdisk_id(instance, i, 'ramdiskId') i['instanceState'] = { - 'code': instance['state'], - 'name': instance['state_description']} + 'code': instance['power_state'], + 'name': instance['vm_state']} #FIXME fixed_addr = None floating_addr = None if instance['fixed_ips']: @@ -1618,22 +1618,22 @@ class CloudController(object): # stop the instance if necessary restart_instance = False if not no_reboot: - state_description = instance['state_description'] + vm_state = instance['vm_state'] # if the instance is in subtle state, refuse to proceed. - if state_description not in ('running', 'stopping', 'stopped'): + if vm_state not in (vm_states.ACTIVE, vm_states.STOP): raise exception.InstanceNotRunning(instance_id=ec2_instance_id) - if state_description == 'running': + if vm_state == vm_states.ACTIVE: restart_instance = True self.compute_api.stop(context, instance_id=instance_id) # wait instance for really stopped start_time = time.time() - while state_description != 'stopped': + while vm_state != vm_states.STOP: time.sleep(1) instance = self.compute_api.get(context, instance_id) - state_description = instance['state_description'] + vm_state = instance['vm_state'] # NOTE(yamahata): timeout and error. 1 hour for now for safety. # Is it too short/long? # Or is there any better way? diff --git a/nova/compute/api.py b/nova/compute/api.py index 77397f90e..8634ad27a 100644 --- a/nova/compute/api.py +++ b/nova/compute/api.py @@ -36,7 +36,7 @@ from nova import utils from nova import volume from nova.compute import instance_types from nova.compute import power_state -from nova.compute import vm_state +from nova.compute import vm_states from nova.compute.utils import terminate_volumes from nova.scheduler import api as scheduler_api from nova.db import base @@ -76,10 +76,10 @@ def generate_default_hostname(instance): def _is_able_to_shutdown(instance, instance_id): states = { - vm_state.DELETE: "Instance %s is already being terminated", - vm_state.MIGRATE: "Instance %s is being migrated", - vm_state.RESIZE: "Instance %s is being resized", - vm_state.STOP: "Instance %s is being stopped", + vm_states.DELETE: "Instance %s is already being terminated", + vm_states.MIGRATE: "Instance %s is being migrated", + vm_states.RESIZE: "Instance %s is being resized", + vm_states.STOP: "Instance %s is being stopped", } msg = states.get(instance['vm_state']) if msg: @@ -236,7 +236,7 @@ class API(base.Base): 'kernel_id': kernel_id or '', 'ramdisk_id': ramdisk_id or '', 'power_state': power_state.NOSTATE, - 'vm_state': vm_state.BUILD, + 'vm_state': vm_states.BUILD, 'user_id': context.user_id, 'project_id': context.project_id, 'launch_time': time.strftime('%Y-%m-%dT%H:%M:%SZ', time.gmtime()), @@ -653,7 +653,7 @@ class API(base.Base): self.update(context, instance_id, - vm_state=vm_state.DELETE, + vm_state=vm_states.DELETE, terminated_at=utils.utcnow()) host = instance['host'] @@ -675,7 +675,7 @@ class API(base.Base): self.update(context, instance_id, - vm_state=vm_state.STOP, + vm_state=vm_states.STOP, terminated_at=utils.utcnow()) host = instance['host'] @@ -689,12 +689,12 @@ class API(base.Base): instance = self._get_instance(context, instance_id, 'starting') vm_state = instance["vm_state"] - if vm_state != vm_state.STOP: + if vm_state != vm_states.STOP: LOG.warning(_("Instance %(instance_id)s is not " "stopped. (%(vm_state)s)") % locals()) return - self.update(context, instance_id, vm_state=vm_state.ACTIVE) + self.update(context, instance_id, vm_state=vm_states.ACTIVE) # TODO(yamahata): injected_files isn't supported right now. # It is used only for osapi. not for ec2 api. @@ -923,7 +923,7 @@ class API(base.Base): @scheduler_api.reroute_compute("reboot") def reboot(self, context, instance_id): """Reboot the given instance.""" - self.update(context, instance_id, vm_state=vm_state.REBOOT) + self.update(context, instance_id, vm_state=vm_states.REBOOT) self._cast_compute_message('reboot_instance', context, instance_id) @scheduler_api.reroute_compute("rebuild") @@ -932,8 +932,8 @@ class API(base.Base): """Rebuild the given instance with the provided metadata.""" instance = db.api.instance_get(context, instance_id) invalid_rebuild_states = [ - vm_state.BUILD, - vm_state.REBUILD, + vm_states.BUILD, + vm_states.REBUILD, ] if instance["vm_state"] in invalid_rebuild_states: @@ -956,7 +956,7 @@ class API(base.Base): "injected_files": files_to_inject, } - self.update(context, instance_id, vm_state=vm_state.REBUILD) + self.update(context, instance_id, vm_state=vm_states.REBUILD) self._cast_compute_message('rebuild_instance', context, @@ -975,7 +975,7 @@ class API(base.Base): raise exception.MigrationNotFoundByStatus(instance_id=instance_id, status='finished') - self.update(context, instance_id, vm_state=vm_state.ACTIVE) + self.update(context, instance_id, vm_state=vm_states.ACTIVE) params = {'migration_id': migration_ref['id']} self._cast_compute_message('revert_resize', context, @@ -998,7 +998,7 @@ class API(base.Base): raise exception.MigrationNotFoundByStatus(instance_id=instance_id, status='finished') - self.update(context, instance_id, vm_state=vm_state.ACTIVE) + self.update(context, instance_id, vm_state=vm_states.ACTIVE) params = {'migration_id': migration_ref['id']} self._cast_compute_message('confirm_resize', context, @@ -1045,7 +1045,7 @@ class API(base.Base): if (current_memory_mb == new_memory_mb) and flavor_id: raise exception.CannotResizeToSameSize() - self.update(context, instance_id, vm_state=vm_state.RESIZE) + self.update(context, instance_id, vm_state=vm_states.RESIZE) instance_ref = self._get_instance(context, instance_id, 'resize') self._cast_scheduler_message(context, @@ -1080,13 +1080,13 @@ class API(base.Base): @scheduler_api.reroute_compute("pause") def pause(self, context, instance_id): """Pause the given instance.""" - self.update(context, instance_id, vm_state=vm_state.PAUSE) + self.update(context, instance_id, vm_state=vm_states.PAUSE) self._cast_compute_message('pause_instance', context, instance_id) @scheduler_api.reroute_compute("unpause") def unpause(self, context, instance_id): """Unpause the given instance.""" - self.update(context, instance_id, vm_state=vm_state.ACTIVE) + self.update(context, instance_id, vm_state=vm_states.ACTIVE) self._cast_compute_message('unpause_instance', context, instance_id) def _call_compute_message_for_host(self, action, context, host, params): @@ -1119,25 +1119,25 @@ class API(base.Base): @scheduler_api.reroute_compute("suspend") def suspend(self, context, instance_id): """Suspend the given instance.""" - self.update(context, instance_id, vm_state=vm_state.SUSPEND) + self.update(context, instance_id, vm_state=vm_states.SUSPEND) self._cast_compute_message('suspend_instance', context, instance_id) @scheduler_api.reroute_compute("resume") def resume(self, context, instance_id): """Resume the given instance.""" - self.update(context, instance_id, vm_state=vm_state.ACTIVE) + self.update(context, instance_id, vm_state=vm_states.ACTIVE) self._cast_compute_message('resume_instance', context, instance_id) @scheduler_api.reroute_compute("rescue") def rescue(self, context, instance_id): """Rescue the given instance.""" - self.update(context, instance_id, vm_state=vm_state.RESCUE) + self.update(context, instance_id, vm_state=vm_states.RESCUE) self._cast_compute_message('rescue_instance', context, instance_id) @scheduler_api.reroute_compute("unrescue") def unrescue(self, context, instance_id): """Unrescue the given instance.""" - self.update(context, instance_id, vm_state=vm_state.ACTIVE) + self.update(context, instance_id, vm_state=vm_states.ACTIVE) self._cast_compute_message('unrescue_instance', context, instance_id) @scheduler_api.reroute_compute("set_admin_password") diff --git a/nova/compute/manager.py b/nova/compute/manager.py index 0d7f3ad71..4be5bdd69 100644 --- a/nova/compute/manager.py +++ b/nova/compute/manager.py @@ -57,7 +57,7 @@ from nova import utils from nova import volume from nova.compute import power_state from nova.compute import task_state -from nova.compute import vm_state +from nova.compute import vm_states from nova.notifier import api as notifier from nova.compute.utils import terminate_volumes from nova.virt import driver @@ -372,7 +372,7 @@ class ComputeManager(manager.SchedulerDependentManager): updates = {} updates['host'] = self.host updates['launched_on'] = self.host - updates['vm_state'] = vm_state.BUILD + updates['vm_state'] = vm_states.BUILD updates['task_state'] = task_state.NETWORKING instance = self.db.instance_update(context, instance_id, updates) instance['injected_files'] = kwargs.get('injected_files', []) @@ -397,7 +397,7 @@ class ComputeManager(manager.SchedulerDependentManager): self._instance_update(context, instance_id, - vm_state=vm_state.BUILD, + vm_state=vm_states.BUILD, task_state=task_state.BLOCK_DEVICE_MAPPING) (swap, ephemerals, @@ -428,7 +428,7 @@ class ComputeManager(manager.SchedulerDependentManager): self._instance_update(context, instance_id, power_state=current_power_state, - vm_state=vm_state.ACTIVE, + vm_state=vm_states.ACTIVE, task_state=None, launched_at=utils.utcnow()) @@ -523,7 +523,7 @@ class ComputeManager(manager.SchedulerDependentManager): self._instance_update(context, instance_id, power_state=current_power_state, - vm_state=vm_state.REBUILD, + vm_state=vm_states.REBUILD, task_state=task_state.REBUILDING) network_info = self._get_instance_nw_info(context, instance_ref) @@ -531,7 +531,7 @@ class ComputeManager(manager.SchedulerDependentManager): self._instance_update(context, instance_id, - vm_state=vm_state.REBUILD, + vm_state=vm_states.REBUILD, task_state=task_state.BLOCK_DEVICE_MAPPING) bd_mapping = self._setup_block_device_mapping(context, instance_id) @@ -544,7 +544,7 @@ class ComputeManager(manager.SchedulerDependentManager): self._instance_update(context, instance_id, - vm_state=vm_state.REBUILD, + vm_state=vm_states.REBUILD, task_state=task_state.SPAWN) self.driver.spawn(context, instance_ref, network_info, bd_mapping) @@ -553,7 +553,7 @@ class ComputeManager(manager.SchedulerDependentManager): self._instance_update(context, instance_id, power_state=current_power_state, - vm_state=vm_state.ACTIVE, + vm_state=vm_states.ACTIVE, task_state=None, image_ref=image_ref, launched_at=utils.utcnow()) @@ -577,7 +577,7 @@ class ComputeManager(manager.SchedulerDependentManager): self._instance_update(context, instance_id, power_state=current_power_state, - vm_state=vm_state.REBOOT, + vm_state=vm_states.REBOOT, task_state=task_state.REBOOTING) if instance_ref['power_state'] != power_state.RUNNING: @@ -595,7 +595,7 @@ class ComputeManager(manager.SchedulerDependentManager): self._instance_update(context, instance_id, power_state=current_power_state, - vm_state=vm_state.ACTIVE, + vm_state=vm_states.ACTIVE, task_state=None) @exception.wrap_exception(notifier=notifier, publisher_id=publisher_id()) @@ -622,7 +622,7 @@ class ComputeManager(manager.SchedulerDependentManager): self._instance_update(context, instance_id, power_state=current_power_state, - vm_state=vm_state.ACTIVE, + vm_state=vm_states.ACTIVE, task_state=image_type) LOG.audit(_('instance %s: snapshotting'), instance_id, @@ -787,7 +787,7 @@ class ComputeManager(manager.SchedulerDependentManager): self._instance_update(context, instance_id, - vm_state=vm_state.RESCUE, + vm_state=vm_states.RESCUE, task_state=task_state.RESCUING) instance_ref = self.db.instance_get(context, instance_id) @@ -799,7 +799,7 @@ class ComputeManager(manager.SchedulerDependentManager): current_power_state = self._get_power_state(context, instance_ref) self._instance_update(context, instance_id, - vm_state=vm_state.RESCUE, + vm_state=vm_states.RESCUE, task_state=task_state.RESCUED, power_state=current_power_state) @@ -812,7 +812,7 @@ class ComputeManager(manager.SchedulerDependentManager): self._instance_update(context, instance_id, - vm_state=vm_state.ACTIVE, + vm_state=vm_states.ACTIVE, task_state=task_state.UNRESCUING) instance_ref = self.db.instance_get(context, instance_id) @@ -824,7 +824,7 @@ class ComputeManager(manager.SchedulerDependentManager): current_power_state = self._get_power_state(context, instance_ref) self._instance_update(context, instance_id, - vm_state=vm_state.ACTIVE, + vm_state=vm_states.ACTIVE, task_state=None, power_state=current_power_state) @@ -1048,7 +1048,7 @@ class ComputeManager(manager.SchedulerDependentManager): self._instance_update(context, instance_id, - vm_state=vm_state.PAUSE, + vm_state=vm_states.PAUSE, task_state=task_state.PAUSING) instance_ref = self.db.instance_get(context, instance_id) @@ -1058,7 +1058,7 @@ class ComputeManager(manager.SchedulerDependentManager): self._instance_update(context, instance_id, power_state=current_power_state, - vm_state=vm_state.PAUSE, + vm_state=vm_states.PAUSE, task_state=None) @exception.wrap_exception(notifier=notifier, publisher_id=publisher_id()) @@ -1070,7 +1070,7 @@ class ComputeManager(manager.SchedulerDependentManager): self._instance_update(context, instance_id, - vm_state=vm_state.ACTIVE, + vm_state=vm_states.ACTIVE, task_state=task_state.UNPAUSING) instance_ref = self.db.instance_get(context, instance_id) @@ -1080,7 +1080,7 @@ class ComputeManager(manager.SchedulerDependentManager): self._instance_update(context, instance_id, power_state=current_power_state, - vm_state=vm_state.ACTIVE, + vm_state=vm_states.ACTIVE, task_state=None) @exception.wrap_exception(notifier=notifier, publisher_id=publisher_id()) @@ -1111,7 +1111,7 @@ class ComputeManager(manager.SchedulerDependentManager): self._instance_update(context, instance_id, - vm_state=vm_state.SUSPEND, + vm_state=vm_states.SUSPEND, task_state=task_state.SUSPENDING) instance_ref = self.db.instance_get(context, instance_id) @@ -1121,7 +1121,7 @@ class ComputeManager(manager.SchedulerDependentManager): self._instance_update(context, instance_id, power_state=current_power_state, - vm_state=vm_state.SUSPEND, + vm_state=vm_states.SUSPEND, task_state=None) @exception.wrap_exception(notifier=notifier, publisher_id=publisher_id()) @@ -1133,7 +1133,7 @@ class ComputeManager(manager.SchedulerDependentManager): self._instance_update(context, instance_id, - vm_state=vm_state.ACTIVE, + vm_state=vm_states.ACTIVE, task_state=task_state.RESUMING) instance_ref = self.db.instance_get(context, instance_id) @@ -1143,7 +1143,7 @@ class ComputeManager(manager.SchedulerDependentManager): self._instance_update(context, instance_id, power_state=current_power_state, - vm_state=vm_state.ACTIVE, + vm_state=vm_states.ACTIVE, task_state=None) @exception.wrap_exception(notifier=notifier, publisher_id=publisher_id()) @@ -1560,7 +1560,7 @@ class ComputeManager(manager.SchedulerDependentManager): instance_ref["id"], host=dest, power_state=current_power_state, - vm_state=vm_state.ACTIVE, + vm_state=vm_states.ACTIVE, task_state=None) # Restore volume state @@ -1611,7 +1611,7 @@ class ComputeManager(manager.SchedulerDependentManager): self._instance_update(context, instance_ref['id'], host=host, - vm_state=vm_state.ACTIVE, + vm_state=vm_states.ACTIVE, task_state=None) for volume_ref in instance_ref['volumes']: diff --git a/nova/compute/vm_state.py b/nova/compute/vm_state.py deleted file mode 100644 index a1bca6ef4..000000000 --- a/nova/compute/vm_state.py +++ /dev/null @@ -1,31 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright 2010 OpenStack LLC. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""Possible vm states for instances""" - -ACTIVE='active' -BUILD='build' -REBUILD='rebuild' -REBOOT='reboot' -DELETE='delete' -STOP='stop' -MIGRATE='migrate' -RESIZE='resize' -VERIFY_RESIZE='verify_resize' -PAUSE='pause' -SUSPEND='suspend' -RESCUE='rescue' diff --git a/nova/compute/vm_states.py b/nova/compute/vm_states.py new file mode 100644 index 000000000..a1bca6ef4 --- /dev/null +++ b/nova/compute/vm_states.py @@ -0,0 +1,31 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""Possible vm states for instances""" + +ACTIVE='active' +BUILD='build' +REBUILD='rebuild' +REBOOT='reboot' +DELETE='delete' +STOP='stop' +MIGRATE='migrate' +RESIZE='resize' +VERIFY_RESIZE='verify_resize' +PAUSE='pause' +SUSPEND='suspend' +RESCUE='rescue' diff --git a/nova/scheduler/driver.py b/nova/scheduler/driver.py index b788b996f..8f9be879b 100644 --- a/nova/scheduler/driver.py +++ b/nova/scheduler/driver.py @@ -31,7 +31,7 @@ from nova import rpc from nova import utils from nova.compute import power_state from nova.compute import task_state -from nova.compute import vm_state +from nova.compute import vm_states from nova.api.ec2 import ec2utils @@ -106,7 +106,7 @@ class Scheduler(object): dest, block_migration) # Changing instance_state. - values = {"vm_state": vm_state.MIGRATE} + values = {"vm_state": vm_states.MIGRATE} db.instance_update(context, instance_id, values) # Changing volume state diff --git a/nova/tests/scheduler/test_scheduler.py b/nova/tests/scheduler/test_scheduler.py index 1b5e131c9..629019eaf 100644 --- a/nova/tests/scheduler/test_scheduler.py +++ b/nova/tests/scheduler/test_scheduler.py @@ -40,7 +40,7 @@ from nova.scheduler import driver from nova.scheduler import manager from nova.scheduler import multi from nova.compute import power_state -from nova.compute import vm_state +from nova.compute import vm_states FLAGS = flags.FLAGS diff --git a/nova/tests/test_compute.py b/nova/tests/test_compute.py index 188398924..ca1bbc69f 100644 --- a/nova/tests/test_compute.py +++ b/nova/tests/test_compute.py @@ -23,7 +23,7 @@ from nova import compute from nova.compute import instance_types from nova.compute import manager as compute_manager from nova.compute import power_state -from nova.compute import vm_state +from nova.compute import vm_states from nova import context from nova import db from nova.db.sqlalchemy import models @@ -748,7 +748,7 @@ class ComputeTestCase(test.TestCase): 'block_migration': False, 'disk': None}}).\ AndRaise(rpc.RemoteError('', '', '')) - dbmock.instance_update(c, i_ref['id'], {'vm_state': vm_state.ACTIVE, + dbmock.instance_update(c, i_ref['id'], {'vm_state': vm_states.ACTIVE, 'task_state': None, 'host': i_ref['host']}) for v in i_ref['volumes']: @@ -780,7 +780,7 @@ class ComputeTestCase(test.TestCase): 'block_migration': False, 'disk': None}}).\ AndRaise(rpc.RemoteError('', '', '')) - dbmock.instance_update(c, i_ref['id'], {'vm_state': vm_state.ACTIVE, + dbmock.instance_update(c, i_ref['id'], {'vm_state': vm_states.ACTIVE, 'task_state': None, 'host': i_ref['host']}) @@ -826,7 +826,7 @@ class ComputeTestCase(test.TestCase): c = context.get_admin_context() instance_id = self._create_instance() i_ref = db.instance_get(c, instance_id) - db.instance_update(c, i_ref['id'], {'vm_state': vm_state.MIGRATE, + db.instance_update(c, i_ref['id'], {'vm_state': vm_states.MIGRATE, 'power_state': power_state.PAUSED}) v_ref = db.volume_create(c, {'size': 1, 'instance_id': instance_id}) fix_addr = db.fixed_ip_create(c, {'address': '1.1.1.1', -- cgit From c4e77b67a74cb0828bb9a7ccbedcaa1baeb6188d Mon Sep 17 00:00:00 2001 From: Brian Lamar Date: Fri, 19 Aug 2011 18:34:34 -0400 Subject: Lots of modifications surrounding the OSAPI to remove any mention of dealing with power states and exclusively using vm_states and task_state modules. Currently there are still a number of tests failing, but this is a stopping place for today. --- nova/api/openstack/common.py | 52 ++++++------ nova/api/openstack/servers.py | 12 ++- nova/api/openstack/views/servers.py | 5 +- nova/compute/vm_states.py | 2 + nova/tests/api/openstack/test_server_actions.py | 28 ++++--- nova/tests/api/openstack/test_servers.py | 102 +++++++++++++++--------- nova/tests/integrated/test_servers.py | 27 ++++--- nova/tests/vmwareapi/db_fakes.py | 5 +- 8 files changed, 138 insertions(+), 95 deletions(-) diff --git a/nova/api/openstack/common.py b/nova/api/openstack/common.py index d9eb832f2..eae0fd916 100644 --- a/nova/api/openstack/common.py +++ b/nova/api/openstack/common.py @@ -27,7 +27,8 @@ from nova import flags from nova import log as logging from nova import quota from nova.api.openstack import wsgi -from nova.compute import power_state as compute_power_state +from nova.compute import vm_states +from nova.compute import task_state LOG = logging.getLogger('nova.api.openstack.common') @@ -38,36 +39,35 @@ XML_NS_V10 = 'http://docs.rackspacecloud.com/servers/api/v1.0' XML_NS_V11 = 'http://docs.openstack.org/compute/api/v1.1' -_STATUS_MAP = { - None: 'BUILD', - compute_power_state.NOSTATE: 'BUILD', - compute_power_state.RUNNING: 'ACTIVE', - compute_power_state.BLOCKED: 'ACTIVE', - compute_power_state.SUSPENDED: 'SUSPENDED', - compute_power_state.PAUSED: 'PAUSED', - compute_power_state.SHUTDOWN: 'SHUTDOWN', - compute_power_state.SHUTOFF: 'SHUTOFF', - compute_power_state.CRASHED: 'ERROR', - compute_power_state.FAILED: 'ERROR', - compute_power_state.BUILDING: 'BUILD', +_STATE_MAP = { + vm_states.ACTIVE: 'ACTIVE', + vm_states.BUILD: 'BUILD', + vm_states.REBUILD: 'REBUILD', + vm_states.REBOOT: 'REBOOT', + vm_states.HARD_REBOOT: 'HARD_REBOOT', + vm_states.STOP: 'STOPPED', + vm_states.MIGRATE: 'MIGRATING', + vm_states.RESIZE: 'RESIZE', + vm_states.VERIFY_RESIZE: 'VERIFY_RESIZE', + vm_states.PAUSE: 'PAUSED', + vm_states.SUSPEND: 'SUSPENDED', + vm_states.RESCUE: 'RESCUE', + vm_states.ERROR: 'ERROR', } -def status_from_power_state(power_state): - """Map the power state to the server status string""" - return _STATUS_MAP[power_state] +def status_from_state(_vm_state, _task_state=None): + """Given vm_state and task_state, return a status string.""" + if _vm_state == vm_states.ACTIVE and _task_state == task_state.PASSWORD: + return "PASSWORD" + return _STATE_MAP.get(_vm_state, "UNKNOWN_STATE") -def power_states_from_status(status): - """Map the server status string to a list of power states""" - power_states = [] - for power_state, status_map in _STATUS_MAP.iteritems(): - # Skip the 'None' state - if power_state is None: - continue - if status.lower() == status_map.lower(): - power_states.append(power_state) - return power_states +def vm_state_from_status(status): + """Map the server status string to a vm state.""" + for state, status_string in _STATE_MAP.iteritems(): + if status.lower() == status_string.lower(): + return state def get_pagination_params(request): diff --git a/nova/api/openstack/servers.py b/nova/api/openstack/servers.py index 41e63ec3c..0a451caee 100644 --- a/nova/api/openstack/servers.py +++ b/nova/api/openstack/servers.py @@ -95,17 +95,15 @@ class Controller(object): search_opts['recurse_zones'] = utils.bool_from_str( search_opts.get('recurse_zones', False)) - # If search by 'status', we need to convert it to 'state' - # If the status is unknown, bail. - # Leave 'state' in search_opts so compute can pass it on to - # child zones.. + # If search by 'status', we need to convert it to 'vm_state' + # to pass on to child zones. if 'status' in search_opts: status = search_opts['status'] - search_opts['state'] = common.power_states_from_status(status) - if len(search_opts['state']) == 0: + state = common.vm_state_from_status(status) + if state is None: reason = _('Invalid server status: %(status)s') % locals() - LOG.error(reason) raise exception.InvalidInput(reason=reason) + search_opts['vm_state'] = state # By default, compute's get_all() will return deleted instances. # If an admin hasn't specified a 'deleted' search option, we need diff --git a/nova/api/openstack/views/servers.py b/nova/api/openstack/views/servers.py index edc328129..e9a932b0e 100644 --- a/nova/api/openstack/views/servers.py +++ b/nova/api/openstack/views/servers.py @@ -27,6 +27,7 @@ from nova.api.openstack.views import addresses as addresses_view from nova.api.openstack.views import flavors as flavors_view from nova.api.openstack.views import images as images_view from nova import utils +from nova.compute import vm_states class ViewBuilder(object): @@ -60,11 +61,13 @@ class ViewBuilder(object): def _build_detail(self, inst): """Returns a detailed model of a server.""" + vm_state = inst.get('vm_state', vm_states.BUILD) + task_state = inst.get('task_state') inst_dict = { 'id': inst['id'], 'name': inst['display_name'], - 'status': common.status_from_power_state(inst.get('state'))} + 'status': common.status_from_state(vm_state, task_state)} ctxt = nova.context.get_admin_context() compute_api = nova.compute.API() diff --git a/nova/compute/vm_states.py b/nova/compute/vm_states.py index a1bca6ef4..d3d168001 100644 --- a/nova/compute/vm_states.py +++ b/nova/compute/vm_states.py @@ -21,6 +21,7 @@ ACTIVE='active' BUILD='build' REBUILD='rebuild' REBOOT='reboot' +HARD_REBOOT='hard_reboot' DELETE='delete' STOP='stop' MIGRATE='migrate' @@ -29,3 +30,4 @@ VERIFY_RESIZE='verify_resize' PAUSE='pause' SUSPEND='suspend' RESCUE='rescue' +ERROR='error' diff --git a/nova/tests/api/openstack/test_server_actions.py b/nova/tests/api/openstack/test_server_actions.py index 80a27e30f..6f8be0f47 100644 --- a/nova/tests/api/openstack/test_server_actions.py +++ b/nova/tests/api/openstack/test_server_actions.py @@ -12,7 +12,8 @@ from nova import utils from nova import flags from nova.api.openstack import create_instance_helper from nova.compute import instance_types -from nova.compute import power_state +from nova.compute import vm_states +from nova.compute import task_state import nova.db.api from nova import test from nova.tests.api.openstack import common @@ -30,17 +31,18 @@ def instance_update(context, instance_id, kwargs): return _get_instance() -def return_server_with_power_state(power_state): +def return_server_with_state(vm_state, task_state=None): def _return_server(context, id): instance = _get_instance() - instance['state'] = power_state + instance['vm_state'] = vm_state + instance['task_state'] = task_state return instance return _return_server -def return_server_with_uuid_and_power_state(power_state): +def return_server_with_uuid_and_state(vm_state, task_state=None): def _return_server(context, id): - return return_server_with_power_state(power_state) + return return_server_with_state(vm_state, task_state) return _return_server @@ -68,8 +70,8 @@ def _get_instance(): "launch_index": 0, "key_name": "", "key_data": "", - "state": 0, - "state_description": "", + "vm_state": vm_states.ACTIVE, + "task_state": None, "memory_mb": 0, "vcpus": 0, "local_gb": 0, @@ -164,11 +166,11 @@ class ServerActionsTest(test.TestCase): }, } - state = power_state.BUILDING - new_return_server = return_server_with_power_state(state) + state = vm_states.BUILD + new_return_server = return_server_with_state(state) self.stubs.Set(nova.db.api, 'instance_get', new_return_server) self.stubs.Set(nova.db, 'instance_get_by_uuid', - return_server_with_uuid_and_power_state(state)) + return_server_with_uuid_and_state(state)) req = webob.Request.blank('/v1.0/servers/1/action') req.method = 'POST' @@ -627,11 +629,11 @@ class ServerActionsTestV11(test.TestCase): }, } - state = power_state.BUILDING - new_return_server = return_server_with_power_state(state) + state = vm_states.BUILD + new_return_server = return_server_with_state(state) self.stubs.Set(nova.db.api, 'instance_get', new_return_server) self.stubs.Set(nova.db, 'instance_get_by_uuid', - return_server_with_uuid_and_power_state(state)) + return_server_with_uuid_and_state(state)) req = webob.Request.blank('/v1.1/servers/1/action') req.method = 'POST' diff --git a/nova/tests/api/openstack/test_servers.py b/nova/tests/api/openstack/test_servers.py index 437620854..b500c514e 100644 --- a/nova/tests/api/openstack/test_servers.py +++ b/nova/tests/api/openstack/test_servers.py @@ -34,7 +34,8 @@ from nova.api.openstack import servers from nova.api.openstack import wsgi import nova.compute.api from nova.compute import instance_types -from nova.compute import power_state +from nova.compute import task_state +from nova.compute import vm_states import nova.db.api import nova.scheduler.api from nova.db.sqlalchemy.models import Instance @@ -86,15 +87,18 @@ def return_server_with_addresses(private, public): return _return_server -def return_server_with_power_state(power_state): +def return_server_with_state(vm_state, task_state=None): def _return_server(context, id): - return stub_instance(id, power_state=power_state) + return stub_instance(id, vm_state=vm_state, task_state=task_state) return _return_server -def return_server_with_uuid_and_power_state(power_state): +def return_server_with_uuid_and_state(vm_state, task_state): def _return_server(context, id): - return stub_instance(id, uuid=FAKE_UUID, power_state=power_state) + return stub_instance(id, + uuid=FAKE_UUID, + vm_state=vm_state, + task_state=task_state) return _return_server @@ -143,7 +147,8 @@ def instance_addresses(context, instance_id): def stub_instance(id, user_id='fake', project_id='fake', private_address=None, - public_addresses=None, host=None, power_state=0, + public_addresses=None, host=None, + vm_state=None, task_state=None, reservation_id="", uuid=FAKE_UUID, image_ref="10", flavor_id="1", interfaces=None, name=None): metadata = [] @@ -178,8 +183,8 @@ def stub_instance(id, user_id='fake', project_id='fake', private_address=None, "launch_index": 0, "key_name": "", "key_data": "", - "state": power_state, - "state_description": "", + "vm_state": vm_state or vm_states.BUILD, + "task_state": task_state, "memory_mb": 0, "vcpus": 0, "local_gb": 0, @@ -481,7 +486,7 @@ class ServersTest(test.TestCase): }, ] new_return_server = return_server_with_attributes( - interfaces=interfaces, power_state=1) + interfaces=interfaces, vm_state=vm_states.ACTIVE) self.stubs.Set(nova.db.api, 'instance_get', new_return_server) req = webob.Request.blank('/v1.1/servers/1') @@ -571,8 +576,8 @@ class ServersTest(test.TestCase): }, ] new_return_server = return_server_with_attributes( - interfaces=interfaces, power_state=1, image_ref=image_ref, - flavor_id=flavor_id) + interfaces=interfaces, vm_state=vm_states.ACTIVE, + image_ref=image_ref, flavor_id=flavor_id) self.stubs.Set(nova.db.api, 'instance_get', new_return_server) req = webob.Request.blank('/v1.1/servers/1') @@ -1169,9 +1174,8 @@ class ServersTest(test.TestCase): def test_get_servers_allows_status_v1_1(self): def fake_get_all(compute_self, context, search_opts=None): self.assertNotEqual(search_opts, None) - self.assertTrue('state' in search_opts) - self.assertEqual(set(search_opts['state']), - set([power_state.RUNNING, power_state.BLOCKED])) + self.assertTrue('vm_state' in search_opts) + self.assertEqual(search_opts['vm_state'], vm_states.ACTIVE) return [stub_instance(100)] self.stubs.Set(nova.compute.API, 'get_all', fake_get_all) @@ -1188,13 +1192,9 @@ class ServersTest(test.TestCase): def test_get_servers_invalid_status_v1_1(self): """Test getting servers by invalid status""" - self.flags(allow_admin_api=False) - req = webob.Request.blank('/v1.1/servers?status=running') res = req.get_response(fakes.wsgi_app()) - # The following assert will fail if either of the asserts in - # fake_get_all() fail self.assertEqual(res.status_int, 400) self.assertTrue(res.body.find('Invalid server status') > -1) @@ -1632,6 +1632,7 @@ class ServersTest(test.TestCase): server = json.loads(res.body)['server'] self.assertEqual(16, len(server['adminPass'])) self.assertEqual(1, server['id']) + self.assertEqual("BUILD", server["status"]) self.assertEqual(0, server['progress']) self.assertEqual('server_test', server['name']) self.assertEqual(expected_flavor, server['flavor']) @@ -2165,23 +2166,52 @@ class ServersTest(test.TestCase): self.assertEqual(res.status_int, 204) self.assertEqual(self.server_delete_called, True) - def test_shutdown_status(self): - new_server = return_server_with_power_state(power_state.SHUTDOWN) - self.stubs.Set(nova.db.api, 'instance_get', new_server) - req = webob.Request.blank('/v1.0/servers/1') - res = req.get_response(fakes.wsgi_app()) - self.assertEqual(res.status_int, 200) - res_dict = json.loads(res.body) - self.assertEqual(res_dict['server']['status'], 'SHUTDOWN') - def test_shutoff_status(self): - new_server = return_server_with_power_state(power_state.SHUTOFF) +class TestServerStatus(test.TestCase): + + def _get_with_state(self, vm_state, task_state=None): + new_server = return_server_with_state(vm_state, task_state) self.stubs.Set(nova.db.api, 'instance_get', new_server) - req = webob.Request.blank('/v1.0/servers/1') - res = req.get_response(fakes.wsgi_app()) - self.assertEqual(res.status_int, 200) - res_dict = json.loads(res.body) - self.assertEqual(res_dict['server']['status'], 'SHUTOFF') + request = webob.Request.blank('/v1.0/servers/1') + response = request.get_response(fakes.wsgi_app()) + self.assertEqual(response.status_int, 200) + return json.loads(response.body) + + def test_active(self): + response = self._get_with_state(vm_states.ACTIVE) + self.assertEqual(response['server']['status'], 'ACTIVE') + + def test_reboot(self): + response = self._get_with_state(vm_states.REBOOT) + self.assertEqual(response['server']['status'], 'REBOOT') + + def test_hard_reboot(self): + response = self._get_with_state(vm_states.HARD_REBOOT) + self.assertEqual(response['server']['status'], 'HARD_REBOOT') + + def test_rebuild(self): + response = self._get_with_state(vm_states.REBUILD) + self.assertEqual(response['server']['status'], 'REBUILD') + + def test_rebuild_error(self): + response = self._get_with_state(vm_states.ERROR) + self.assertEqual(response['server']['status'], 'ERROR') + + def test_resize(self): + response = self._get_with_state(vm_states.RESIZE) + self.assertEqual(response['server']['status'], 'RESIZE') + + def test_verify_resize(self): + response = self._get_with_state(vm_states.VERIFY_RESIZE) + self.assertEqual(response['server']['status'], 'VERIFY_RESIZE') + + def test_password_update(self): + response = self._get_with_state(vm_states.ACTIVE, task_state.PASSWORD) + self.assertEqual(response['server']['status'], 'PASSWORD') + + def test_stopped(self): + response = self._get_with_state(vm_states.STOP) + self.assertEqual(response['server']['status'], 'STOPPED') class TestServerCreateRequestXMLDeserializerV10(unittest.TestCase): @@ -3018,8 +3048,8 @@ class ServersViewBuilderV11Test(test.TestCase): "launch_index": 0, "key_name": "", "key_data": "", - "state": 0, - "state_description": "", + "vm_state": vm_states.BUILD, + "task_state": None, "memory_mb": 0, "vcpus": 0, "local_gb": 0, @@ -3132,7 +3162,7 @@ class ServersViewBuilderV11Test(test.TestCase): def test_build_server_detail_active_status(self): #set the power state of the instance to running - self.instance['state'] = 1 + self.instance['vm_state'] = vm_states.ACTIVE image_bookmark = "http://localhost/images/5" flavor_bookmark = "http://localhost/flavors/1" expected_server = { diff --git a/nova/tests/integrated/test_servers.py b/nova/tests/integrated/test_servers.py index 725f6d529..deeb3d008 100644 --- a/nova/tests/integrated/test_servers.py +++ b/nova/tests/integrated/test_servers.py @@ -28,6 +28,17 @@ LOG = logging.getLogger('nova.tests.integrated') class ServersTest(integrated_helpers._IntegratedTestBase): + def _wait_for_creation(self, server): + retries = 0 + while server['status'] == 'BUILD': + time.sleep(1) + server = self.api.get_server(server['id']) + print server + retries = retries + 1 + if retries > 8: + break + return server + def test_get_servers(self): """Simple check that listing servers works.""" servers = self.api.get_servers() @@ -36,7 +47,7 @@ class ServersTest(integrated_helpers._IntegratedTestBase): def test_create_and_delete_server(self): """Creates and deletes a server.""" - + self.flags(stub_network=True) # Create server # Build the server data gradually, checking errors along the way @@ -91,19 +102,11 @@ class ServersTest(integrated_helpers._IntegratedTestBase): server_ids = [server['id'] for server in servers] self.assertTrue(created_server_id in server_ids) - # Wait (briefly) for creation - retries = 0 - while found_server['status'] == 'build': - LOG.debug("found server: %s" % found_server) - time.sleep(1) - found_server = self.api.get_server(created_server_id) - retries = retries + 1 - if retries > 5: - break + found_server = self._wait_for_creation(found_server) # It should be available... # TODO(justinsb): Mock doesn't yet do this... - #self.assertEqual('available', found_server['status']) + self.assertEqual('ACTIVE', found_server['status']) servers = self.api.get_servers(detail=True) for server in servers: self.assertTrue("image" in server) @@ -190,6 +193,8 @@ class ServersTest(integrated_helpers._IntegratedTestBase): self.assertTrue(created_server['id']) created_server_id = created_server['id'] + created_server = self._wait_for_creation(created_server) + # rebuild the server with metadata post = {} post['rebuild'] = { diff --git a/nova/tests/vmwareapi/db_fakes.py b/nova/tests/vmwareapi/db_fakes.py index afd672c7a..dd38420ce 100644 --- a/nova/tests/vmwareapi/db_fakes.py +++ b/nova/tests/vmwareapi/db_fakes.py @@ -23,6 +23,8 @@ import time from nova import db from nova import utils +from nova.compute import task_state +from nova.compute import vm_states def stub_out_db_instance_api(stubs): @@ -64,7 +66,8 @@ def stub_out_db_instance_api(stubs): 'image_ref': values['image_ref'], 'kernel_id': values['kernel_id'], 'ramdisk_id': values['ramdisk_id'], - 'state_description': 'scheduling', + 'vm_state': vm_states.BUILD, + 'task_state': task_state.SCHEDULE, 'user_id': values['user_id'], 'project_id': values['project_id'], 'launch_time': time.strftime('%Y-%m-%dT%H:%M:%SZ', time.gmtime()), -- cgit From f82d2d309a0f826522854fe331d1c53b8c6d6879 Mon Sep 17 00:00:00 2001 From: Dan Prince Date: Mon, 22 Aug 2011 09:54:33 -0400 Subject: Ec2 API updates. --- nova/api/ec2/cloud.py | 25 ++++++++++++++++++++++++- nova/tests/test_cloud.py | 8 ++++---- nova/virt/fake.py | 12 ++++++------ 3 files changed, 34 insertions(+), 11 deletions(-) diff --git a/nova/api/ec2/cloud.py b/nova/api/ec2/cloud.py index 4b69cc272..b7c7d2e12 100644 --- a/nova/api/ec2/cloud.py +++ b/nova/api/ec2/cloud.py @@ -79,6 +79,29 @@ def _gen_key(context, user_id, key_name): return {'private_key': private_key, 'fingerprint': fingerprint} +# EC2 API: Valid Values: +# pending | running | shutting-down | terminated | stopping | stopped +_STATE_DESCRIPTION_MAP = { + vm_states.ACTIVE: 'running', + vm_states.BUILD: 'pending', + vm_states.REBUILD: 'pending', + vm_states.REBOOT: 'reboot', + vm_states.DELETE: 'terminated', + vm_states.STOP: 'stopped', + vm_states.MIGRATE: 'migrate', + vm_states.RESIZE: 'resize', + vm_states.VERIFY_RESIZE: 'verify_resize', + vm_states.PAUSE: 'pause', + vm_states.SUSPEND: 'suspend', + vm_states.RESCUE: 'rescue' +} + + +def state_description_from_vm_state(vm_state): + """Map the vm state to the server status string""" + return _STATE_DESCRIPTION_MAP[vm_state] + + # TODO(yamahata): hypervisor dependent default device name _DEFAULT_ROOT_DEVICE_NAME = '/dev/sda1' _DEFAULT_MAPPINGS = {'ami': 'sda1', @@ -1199,7 +1222,7 @@ class CloudController(object): self._format_ramdisk_id(instance, i, 'ramdiskId') i['instanceState'] = { 'code': instance['power_state'], - 'name': instance['vm_state']} #FIXME + 'name': state_description_from_vm_state(instance['vm_state'])} fixed_addr = None floating_addr = None if instance['fixed_ips']: diff --git a/nova/tests/test_cloud.py b/nova/tests/test_cloud.py index 0793784f8..cce9514ec 100644 --- a/nova/tests/test_cloud.py +++ b/nova/tests/test_cloud.py @@ -1163,7 +1163,7 @@ class CloudTestCase(test.TestCase): self.compute = self.start_service('compute') def _wait_for_state(self, ctxt, instance_id, predicate): - """Wait for an stopping instance to be a given state""" + """Wait for a stopped instance to be a given state""" id = ec2utils.ec2_id_to_id(instance_id) while True: info = self.cloud.compute_api.get(context=ctxt, instance_id=id) @@ -1174,12 +1174,12 @@ class CloudTestCase(test.TestCase): def _wait_for_running(self, instance_id): def is_running(info): - return info['state_description'] == 'running' + return info['vm_state'] == 'running' self._wait_for_state(self.context, instance_id, is_running) def _wait_for_stopped(self, instance_id): def is_stopped(info): - return info['state_description'] == 'stopped' + return info['vm_state'] == 'stopped' self._wait_for_state(self.context, instance_id, is_stopped) def _wait_for_terminate(self, instance_id): @@ -1562,7 +1562,7 @@ class CloudTestCase(test.TestCase): 'id': 0, 'root_device_name': '/dev/sdh', 'security_groups': [{'name': 'fake0'}, {'name': 'fake1'}], - 'state_description': 'stopping', + 'vm_state': 'stopped', 'instance_type': {'name': 'fake_type'}, 'kernel_id': 1, 'ramdisk_id': 2, diff --git a/nova/virt/fake.py b/nova/virt/fake.py index dc0628772..b42f4ca2f 100644 --- a/nova/virt/fake.py +++ b/nova/virt/fake.py @@ -42,9 +42,9 @@ def get_connection(_): class FakeInstance(object): - def __init__(self, name, state): + def __init__(self, name, power_state): self.name = name - self.state = state + self.power_state = power_state class FakeConnection(driver.ComputeDriver): @@ -120,7 +120,7 @@ class FakeConnection(driver.ComputeDriver): def _map_to_instance_info(self, instance): instance = utils.check_isinstance(instance, FakeInstance) - info = driver.InstanceInfo(instance.name, instance.state) + info = driver.InstanceInfo(instance.name, instance.power_state) return info def list_instances_detail(self): @@ -150,8 +150,8 @@ class FakeConnection(driver.ComputeDriver): """ name = instance.name - state = power_state.RUNNING - fake_instance = FakeInstance(name, state) + pstate = power_state.RUNNING + fake_instance = FakeInstance(name, pstate) self.instances[name] = fake_instance def snapshot(self, context, instance, name): @@ -325,7 +325,7 @@ class FakeConnection(driver.ComputeDriver): if instance_name not in self.instances: raise exception.InstanceNotFound(instance_id=instance_name) i = self.instances[instance_name] - return {'state': i.state, + return {'state': i.power_state, 'max_mem': 0, 'mem': 0, 'num_cpu': 2, -- cgit From 44aea954e5efa7d94d8333ddbf54dab6464018a0 Mon Sep 17 00:00:00 2001 From: Dan Prince Date: Mon, 22 Aug 2011 10:01:13 -0400 Subject: Renamed task_state to task_states... --- nova/api/openstack/common.py | 6 ++-- nova/compute/manager.py | 30 ++++++++--------- nova/compute/task_state.py | 43 ------------------------- nova/compute/task_states.py | 43 +++++++++++++++++++++++++ nova/tests/api/openstack/test_server_actions.py | 1 - nova/tests/vmwareapi/db_fakes.py | 4 +-- 6 files changed, 63 insertions(+), 64 deletions(-) delete mode 100644 nova/compute/task_state.py create mode 100644 nova/compute/task_states.py diff --git a/nova/api/openstack/common.py b/nova/api/openstack/common.py index eae0fd916..778c1e514 100644 --- a/nova/api/openstack/common.py +++ b/nova/api/openstack/common.py @@ -28,7 +28,7 @@ from nova import log as logging from nova import quota from nova.api.openstack import wsgi from nova.compute import vm_states -from nova.compute import task_state +from nova.compute import task_states LOG = logging.getLogger('nova.api.openstack.common') @@ -56,9 +56,9 @@ _STATE_MAP = { } -def status_from_state(_vm_state, _task_state=None): +def status_from_state(_vm_state, task_state=None): """Given vm_state and task_state, return a status string.""" - if _vm_state == vm_states.ACTIVE and _task_state == task_state.PASSWORD: + if _vm_state == vm_states.ACTIVE and task_state == task_states.PASSWORD: return "PASSWORD" return _STATE_MAP.get(_vm_state, "UNKNOWN_STATE") diff --git a/nova/compute/manager.py b/nova/compute/manager.py index 4be5bdd69..5a4f62b76 100644 --- a/nova/compute/manager.py +++ b/nova/compute/manager.py @@ -56,7 +56,7 @@ from nova import rpc from nova import utils from nova import volume from nova.compute import power_state -from nova.compute import task_state +from nova.compute import task_states from nova.compute import vm_states from nova.notifier import api as notifier from nova.compute.utils import terminate_volumes @@ -373,7 +373,7 @@ class ComputeManager(manager.SchedulerDependentManager): updates['host'] = self.host updates['launched_on'] = self.host updates['vm_state'] = vm_states.BUILD - updates['task_state'] = task_state.NETWORKING + updates['task_state'] = task_states.NETWORKING instance = self.db.instance_update(context, instance_id, updates) instance['injected_files'] = kwargs.get('injected_files', []) instance['admin_pass'] = kwargs.get('admin_password', None) @@ -398,7 +398,7 @@ class ComputeManager(manager.SchedulerDependentManager): self._instance_update(context, instance_id, vm_state=vm_states.BUILD, - task_state=task_state.BLOCK_DEVICE_MAPPING) + task_state=task_states.BLOCK_DEVICE_MAPPING) (swap, ephemerals, block_device_mapping) = self._setup_block_device_mapping( @@ -411,7 +411,7 @@ class ComputeManager(manager.SchedulerDependentManager): self._instance_update(context, instance_id, - task_state=task_state.SPAWN) + task_state=task_states.SPAWN) # TODO(vish) check to make sure the availability zone matches try: @@ -524,7 +524,7 @@ class ComputeManager(manager.SchedulerDependentManager): instance_id, power_state=current_power_state, vm_state=vm_states.REBUILD, - task_state=task_state.REBUILDING) + task_state=task_states.REBUILDING) network_info = self._get_instance_nw_info(context, instance_ref) self.driver.destroy(instance_ref, network_info) @@ -532,7 +532,7 @@ class ComputeManager(manager.SchedulerDependentManager): self._instance_update(context, instance_id, vm_state=vm_states.REBUILD, - task_state=task_state.BLOCK_DEVICE_MAPPING) + task_state=task_states.BLOCK_DEVICE_MAPPING) bd_mapping = self._setup_block_device_mapping(context, instance_id) @@ -545,7 +545,7 @@ class ComputeManager(manager.SchedulerDependentManager): self._instance_update(context, instance_id, vm_state=vm_states.REBUILD, - task_state=task_state.SPAWN) + task_state=task_states.SPAWN) self.driver.spawn(context, instance_ref, network_info, bd_mapping) @@ -578,7 +578,7 @@ class ComputeManager(manager.SchedulerDependentManager): instance_id, power_state=current_power_state, vm_state=vm_states.REBOOT, - task_state=task_state.REBOOTING) + task_state=task_states.REBOOTING) if instance_ref['power_state'] != power_state.RUNNING: state = instance_ref['power_state'] @@ -788,7 +788,7 @@ class ComputeManager(manager.SchedulerDependentManager): self._instance_update(context, instance_id, vm_state=vm_states.RESCUE, - task_state=task_state.RESCUING) + task_state=task_states.RESCUING) instance_ref = self.db.instance_get(context, instance_id) network_info = self._get_instance_nw_info(context, instance_ref) @@ -800,7 +800,7 @@ class ComputeManager(manager.SchedulerDependentManager): self._instance_update(context, instance_id, vm_state=vm_states.RESCUE, - task_state=task_state.RESCUED, + task_state=task_states.RESCUED, power_state=current_power_state) @exception.wrap_exception(notifier=notifier, publisher_id=publisher_id()) @@ -813,7 +813,7 @@ class ComputeManager(manager.SchedulerDependentManager): self._instance_update(context, instance_id, vm_state=vm_states.ACTIVE, - task_state=task_state.UNRESCUING) + task_state=task_states.UNRESCUING) instance_ref = self.db.instance_get(context, instance_id) network_info = self._get_instance_nw_info(context, instance_ref) @@ -1049,7 +1049,7 @@ class ComputeManager(manager.SchedulerDependentManager): self._instance_update(context, instance_id, vm_state=vm_states.PAUSE, - task_state=task_state.PAUSING) + task_state=task_states.PAUSING) instance_ref = self.db.instance_get(context, instance_id) self.driver.pause(instance_ref, lambda result: None) @@ -1071,7 +1071,7 @@ class ComputeManager(manager.SchedulerDependentManager): self._instance_update(context, instance_id, vm_state=vm_states.ACTIVE, - task_state=task_state.UNPAUSING) + task_state=task_states.UNPAUSING) instance_ref = self.db.instance_get(context, instance_id) self.driver.unpause(instance_ref, lambda result: None) @@ -1112,7 +1112,7 @@ class ComputeManager(manager.SchedulerDependentManager): self._instance_update(context, instance_id, vm_state=vm_states.SUSPEND, - task_state=task_state.SUSPENDING) + task_state=task_states.SUSPENDING) instance_ref = self.db.instance_get(context, instance_id) self.driver.suspend(instance_ref, lambda result: None) @@ -1134,7 +1134,7 @@ class ComputeManager(manager.SchedulerDependentManager): self._instance_update(context, instance_id, vm_state=vm_states.ACTIVE, - task_state=task_state.RESUMING) + task_state=task_states.RESUMING) instance_ref = self.db.instance_get(context, instance_id) self.driver.resume(instance_ref, lambda result: None) diff --git a/nova/compute/task_state.py b/nova/compute/task_state.py deleted file mode 100644 index 55466c783..000000000 --- a/nova/compute/task_state.py +++ /dev/null @@ -1,43 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright 2010 OpenStack LLC. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""Possible task states for instances""" - -BLOCK_DEVICE_MAPPING='block_device_mapping' -NETWORKING='networking' -SPAWN='spawn' - -SNAPSHOT='snapshot' -BACKUP='backup' -PASSWORD='password' - -RESIZE_PREP='resize_prep' -RESIZE_MIGRATING='resize_migrating' -RESIZE_MIGRATED='resize_migrated' -RESIZE_FINISH='resize_finish' - -REBUILDING='rebuilding' - -REBOOTING='rebooting' -PAUSING='pausing' -UNPAUSING='unpausing' -SUSPENDING='suspending' -RESUMING='resuming' - -RESCUING='rescuing' -RESCUED='rescued' -UNRESCUING='unrescuing' diff --git a/nova/compute/task_states.py b/nova/compute/task_states.py new file mode 100644 index 000000000..55466c783 --- /dev/null +++ b/nova/compute/task_states.py @@ -0,0 +1,43 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""Possible task states for instances""" + +BLOCK_DEVICE_MAPPING='block_device_mapping' +NETWORKING='networking' +SPAWN='spawn' + +SNAPSHOT='snapshot' +BACKUP='backup' +PASSWORD='password' + +RESIZE_PREP='resize_prep' +RESIZE_MIGRATING='resize_migrating' +RESIZE_MIGRATED='resize_migrated' +RESIZE_FINISH='resize_finish' + +REBUILDING='rebuilding' + +REBOOTING='rebooting' +PAUSING='pausing' +UNPAUSING='unpausing' +SUSPENDING='suspending' +RESUMING='resuming' + +RESCUING='rescuing' +RESCUED='rescued' +UNRESCUING='unrescuing' diff --git a/nova/tests/api/openstack/test_server_actions.py b/nova/tests/api/openstack/test_server_actions.py index 6f8be0f47..011f83773 100644 --- a/nova/tests/api/openstack/test_server_actions.py +++ b/nova/tests/api/openstack/test_server_actions.py @@ -13,7 +13,6 @@ from nova import flags from nova.api.openstack import create_instance_helper from nova.compute import instance_types from nova.compute import vm_states -from nova.compute import task_state import nova.db.api from nova import test from nova.tests.api.openstack import common diff --git a/nova/tests/vmwareapi/db_fakes.py b/nova/tests/vmwareapi/db_fakes.py index dd38420ce..b046071c7 100644 --- a/nova/tests/vmwareapi/db_fakes.py +++ b/nova/tests/vmwareapi/db_fakes.py @@ -23,7 +23,7 @@ import time from nova import db from nova import utils -from nova.compute import task_state +from nova.compute import task_states from nova.compute import vm_states @@ -67,7 +67,7 @@ def stub_out_db_instance_api(stubs): 'kernel_id': values['kernel_id'], 'ramdisk_id': values['ramdisk_id'], 'vm_state': vm_states.BUILD, - 'task_state': task_state.SCHEDULE, + 'task_state': task_states.SCHEDULE, 'user_id': values['user_id'], 'project_id': values['project_id'], 'launch_time': time.strftime('%Y-%m-%dT%H:%M:%SZ', time.gmtime()), -- cgit From 0ea797cd8e709d910c428234417fb179bdfd1525 Mon Sep 17 00:00:00 2001 From: Dan Prince Date: Mon, 22 Aug 2011 10:50:05 -0400 Subject: Update virt/fake to correct power state issue. --- nova/virt/fake.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/nova/virt/fake.py b/nova/virt/fake.py index b42f4ca2f..c12ee3ab8 100644 --- a/nova/virt/fake.py +++ b/nova/virt/fake.py @@ -42,9 +42,9 @@ def get_connection(_): class FakeInstance(object): - def __init__(self, name, power_state): + def __init__(self, name, state): self.name = name - self.power_state = power_state + self.state = state class FakeConnection(driver.ComputeDriver): @@ -150,8 +150,8 @@ class FakeConnection(driver.ComputeDriver): """ name = instance.name - pstate = power_state.RUNNING - fake_instance = FakeInstance(name, pstate) + state = power_state.RUNNING + fake_instance = FakeInstance(name, state) self.instances[name] = fake_instance def snapshot(self, context, instance, name): @@ -325,7 +325,7 @@ class FakeConnection(driver.ComputeDriver): if instance_name not in self.instances: raise exception.InstanceNotFound(instance_id=instance_name) i = self.instances[instance_name] - return {'state': i.power_state, + return {'state': i.state, 'max_mem': 0, 'mem': 0, 'num_cpu': 2, -- cgit From a450c0f3bcc93fe3ec74939e49b109cb02624913 Mon Sep 17 00:00:00 2001 From: Dan Prince Date: Mon, 22 Aug 2011 11:09:24 -0400 Subject: Update migration number. --- .../versions/039_update_instance_states.py | 57 ---------------------- .../versions/040_update_instance_states.py | 57 ++++++++++++++++++++++ 2 files changed, 57 insertions(+), 57 deletions(-) delete mode 100644 nova/db/sqlalchemy/migrate_repo/versions/039_update_instance_states.py create mode 100644 nova/db/sqlalchemy/migrate_repo/versions/040_update_instance_states.py diff --git a/nova/db/sqlalchemy/migrate_repo/versions/039_update_instance_states.py b/nova/db/sqlalchemy/migrate_repo/versions/039_update_instance_states.py deleted file mode 100644 index 07efbf90f..000000000 --- a/nova/db/sqlalchemy/migrate_repo/versions/039_update_instance_states.py +++ /dev/null @@ -1,57 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright 2010 OpenStack LLC. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from sqlalchemy import MetaData, Table, Column, String - -meta = MetaData() - -c_task_state = Column('task_state', - String(length=255, convert_unicode=False, - assert_unicode=None, unicode_error=None, - _warn_on_bytestring=False), - nullable=True) - - -def upgrade(migrate_engine): - # Upgrade operations go here. Don't create your own engine; - # bind migrate_engine to your metadata - meta.bind = migrate_engine - - instances = Table('instances', meta, autoload=True, - autoload_with=migrate_engine) - - c_state = instances.c.state - c_state.alter(name='power_state') - - c_vm_state = instances.c.state_description - c_vm_state.alter(name='vm_state') - - instances.create_column(c_task_state) - - -def downgrade(migrate_engine): - meta.bind = migrate_engine - - instances = Table('instances', meta, autoload=True, - autoload_with=migrate_engine) - - c_state = instances.c.power_state - c_state.alter(name='state') - - c_vm_state = instances.c.vm_state - c_vm_state.alter(name='state_description') - - instances.drop_column('task_state') diff --git a/nova/db/sqlalchemy/migrate_repo/versions/040_update_instance_states.py b/nova/db/sqlalchemy/migrate_repo/versions/040_update_instance_states.py new file mode 100644 index 000000000..07efbf90f --- /dev/null +++ b/nova/db/sqlalchemy/migrate_repo/versions/040_update_instance_states.py @@ -0,0 +1,57 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 OpenStack LLC. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from sqlalchemy import MetaData, Table, Column, String + +meta = MetaData() + +c_task_state = Column('task_state', + String(length=255, convert_unicode=False, + assert_unicode=None, unicode_error=None, + _warn_on_bytestring=False), + nullable=True) + + +def upgrade(migrate_engine): + # Upgrade operations go here. Don't create your own engine; + # bind migrate_engine to your metadata + meta.bind = migrate_engine + + instances = Table('instances', meta, autoload=True, + autoload_with=migrate_engine) + + c_state = instances.c.state + c_state.alter(name='power_state') + + c_vm_state = instances.c.state_description + c_vm_state.alter(name='vm_state') + + instances.create_column(c_task_state) + + +def downgrade(migrate_engine): + meta.bind = migrate_engine + + instances = Table('instances', meta, autoload=True, + autoload_with=migrate_engine) + + c_state = instances.c.power_state + c_state.alter(name='state') + + c_vm_state = instances.c.vm_state + c_vm_state.alter(name='state_description') + + instances.drop_column('task_state') -- cgit From c2736787be23d0893e2d4aebcc2cad6fdc5c2bd1 Mon Sep 17 00:00:00 2001 From: Brian Lamar Date: Mon, 22 Aug 2011 11:57:42 -0400 Subject: Fix scheduler and integrated tests. --- nova/tests/integrated/test_servers.py | 11 +++++++++-- nova/tests/scheduler/test_scheduler.py | 6 +++--- 2 files changed, 12 insertions(+), 5 deletions(-) diff --git a/nova/tests/integrated/test_servers.py b/nova/tests/integrated/test_servers.py index deeb3d008..0e3a6eefb 100644 --- a/nova/tests/integrated/test_servers.py +++ b/nova/tests/integrated/test_servers.py @@ -35,7 +35,7 @@ class ServersTest(integrated_helpers._IntegratedTestBase): server = self.api.get_server(server['id']) print server retries = retries + 1 - if retries > 8: + if retries > 5: break return server @@ -48,8 +48,8 @@ class ServersTest(integrated_helpers._IntegratedTestBase): def test_create_and_delete_server(self): """Creates and deletes a server.""" self.flags(stub_network=True) - # Create server + # Create server # Build the server data gradually, checking errors along the way server = {} good_server = self._build_minimal_create_server_request() @@ -184,6 +184,7 @@ class ServersTest(integrated_helpers._IntegratedTestBase): def test_create_and_rebuild_server(self): """Rebuild a server.""" + self.flags(stub_network=True) # create a server with initially has no metadata server = self._build_minimal_create_server_request() @@ -216,6 +217,7 @@ class ServersTest(integrated_helpers._IntegratedTestBase): def test_create_and_rebuild_server_with_metadata(self): """Rebuild a server with metadata.""" + self.flags(stub_network=True) # create a server with initially has no metadata server = self._build_minimal_create_server_request() @@ -225,6 +227,8 @@ class ServersTest(integrated_helpers._IntegratedTestBase): self.assertTrue(created_server['id']) created_server_id = created_server['id'] + created_server = self._wait_for_creation(created_server) + # rebuild the server with metadata post = {} post['rebuild'] = { @@ -252,6 +256,7 @@ class ServersTest(integrated_helpers._IntegratedTestBase): def test_create_and_rebuild_server_with_metadata_removal(self): """Rebuild a server with metadata.""" + self.flags(stub_network=True) # create a server with initially has no metadata server = self._build_minimal_create_server_request() @@ -268,6 +273,8 @@ class ServersTest(integrated_helpers._IntegratedTestBase): self.assertTrue(created_server['id']) created_server_id = created_server['id'] + created_server = self._wait_for_creation(created_server) + # rebuild the server with metadata post = {} post['rebuild'] = { diff --git a/nova/tests/scheduler/test_scheduler.py b/nova/tests/scheduler/test_scheduler.py index 629019eaf..a1281ae73 100644 --- a/nova/tests/scheduler/test_scheduler.py +++ b/nova/tests/scheduler/test_scheduler.py @@ -95,7 +95,7 @@ class SchedulerTestCase(test.TestCase): inst['vcpus'] = kwargs.get('vcpus', 1) inst['memory_mb'] = kwargs.get('memory_mb', 10) inst['local_gb'] = kwargs.get('local_gb', 20) - inst['vm_state'] = kwargs.get('vm_state', vm_state.ACTIVE) + inst['vm_state'] = kwargs.get('vm_state', vm_states.ACTIVE) inst['power_state'] = kwargs.get('power_state', power_state.RUNNING) inst['task_state'] = kwargs.get('task_state', None) return db.instance_create(ctxt, inst) @@ -275,7 +275,7 @@ class SimpleDriverTestCase(test.TestCase): inst['memory_mb'] = kwargs.get('memory_mb', 20) inst['local_gb'] = kwargs.get('local_gb', 30) inst['launched_on'] = kwargs.get('launghed_on', 'dummy') - inst['vm_state'] = kwargs.get('vm_state', vm_state.ACTIVE) + inst['vm_state'] = kwargs.get('vm_state', vm_states.ACTIVE) inst['task_state'] = kwargs.get('task_state', None) inst['power_state'] = kwargs.get('power_state', power_state.RUNNING) return db.instance_create(self.context, inst)['id'] @@ -669,7 +669,7 @@ class SimpleDriverTestCase(test.TestCase): block_migration=False) i_ref = db.instance_get(self.context, instance_id) - self.assertTrue(i_ref['vm_state'] == vm_state.MIGRATE) + self.assertTrue(i_ref['vm_state'] == vm_states.MIGRATE) db.instance_destroy(self.context, instance_id) db.volume_destroy(self.context, v_ref['id']) -- cgit From d60f813201df345507ce0aca7bed0f8b719aabfe Mon Sep 17 00:00:00 2001 From: Dan Prince Date: Mon, 22 Aug 2011 11:59:08 -0400 Subject: Fixes/updates to make test_cloud pass. --- nova/api/ec2/cloud.py | 1 + nova/compute/manager.py | 2 +- nova/tests/test_cloud.py | 7 ++++--- nova/virt/fake.py | 2 +- 4 files changed, 7 insertions(+), 5 deletions(-) diff --git a/nova/api/ec2/cloud.py b/nova/api/ec2/cloud.py index b7c7d2e12..8bddf3032 100644 --- a/nova/api/ec2/cloud.py +++ b/nova/api/ec2/cloud.py @@ -82,6 +82,7 @@ def _gen_key(context, user_id, key_name): # EC2 API: Valid Values: # pending | running | shutting-down | terminated | stopping | stopped _STATE_DESCRIPTION_MAP = { + None: 'pending', vm_states.ACTIVE: 'running', vm_states.BUILD: 'pending', vm_states.REBUILD: 'pending', diff --git a/nova/compute/manager.py b/nova/compute/manager.py index 5a4f62b76..75928f7ef 100644 --- a/nova/compute/manager.py +++ b/nova/compute/manager.py @@ -1706,7 +1706,7 @@ class ComputeManager(manager.SchedulerDependentManager): if vm_instance is None: vm_power_state = power_state.NOSTATE else: - vm_power_state = vm_instance["power_state"] + vm_power_state = vm_instance.state if vm_power_state == db_power_state: continue diff --git a/nova/tests/test_cloud.py b/nova/tests/test_cloud.py index cce9514ec..4d148f39e 100644 --- a/nova/tests/test_cloud.py +++ b/nova/tests/test_cloud.py @@ -38,6 +38,7 @@ from nova import test from nova import utils from nova.api.ec2 import cloud from nova.api.ec2 import ec2utils +from nova.compute import vm_states from nova.image import fake @@ -1174,12 +1175,12 @@ class CloudTestCase(test.TestCase): def _wait_for_running(self, instance_id): def is_running(info): - return info['vm_state'] == 'running' + return info['vm_state'] == vm_states.ACTIVE self._wait_for_state(self.context, instance_id, is_running) def _wait_for_stopped(self, instance_id): def is_stopped(info): - return info['vm_state'] == 'stopped' + return info['vm_state'] == vm_states.STOP self._wait_for_state(self.context, instance_id, is_stopped) def _wait_for_terminate(self, instance_id): @@ -1562,7 +1563,7 @@ class CloudTestCase(test.TestCase): 'id': 0, 'root_device_name': '/dev/sdh', 'security_groups': [{'name': 'fake0'}, {'name': 'fake1'}], - 'vm_state': 'stopped', + 'vm_state': vm_states.STOP, 'instance_type': {'name': 'fake_type'}, 'kernel_id': 1, 'ramdisk_id': 2, diff --git a/nova/virt/fake.py b/nova/virt/fake.py index c12ee3ab8..dc0628772 100644 --- a/nova/virt/fake.py +++ b/nova/virt/fake.py @@ -120,7 +120,7 @@ class FakeConnection(driver.ComputeDriver): def _map_to_instance_info(self, instance): instance = utils.check_isinstance(instance, FakeInstance) - info = driver.InstanceInfo(instance.name, instance.power_state) + info = driver.InstanceInfo(instance.name, instance.state) return info def list_instances_detail(self): -- cgit From 393c9375626812ecb904d9048c833b0d110e9aa8 Mon Sep 17 00:00:00 2001 From: Dan Prince Date: Mon, 22 Aug 2011 13:04:05 -0400 Subject: Use 'vm_state' instead of 'state' in instance filters query. --- nova/db/sqlalchemy/api.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nova/db/sqlalchemy/api.py b/nova/db/sqlalchemy/api.py index a5ed2363f..3e690e094 100644 --- a/nova/db/sqlalchemy/api.py +++ b/nova/db/sqlalchemy/api.py @@ -1239,7 +1239,7 @@ def instance_get_all_by_filters(context, filters): # Filters for exact matches that we can do along with the SQL query... # For other filters that don't match this, we will do regexp matching exact_match_filter_names = ['project_id', 'user_id', 'image_ref', - 'state', 'instance_type_id', 'deleted'] + 'vm_state', 'instance_type_id', 'deleted'] query_filters = [key for key in filters.iterkeys() if key in exact_match_filter_names] -- cgit From ea3684d2a2e60f19bdea6b3117be613103a605dc Mon Sep 17 00:00:00 2001 From: Brian Lamar Date: Mon, 22 Aug 2011 13:16:48 -0400 Subject: Fixes for a number of tests. --- nova/compute/api.py | 3 +++ nova/compute/task_states.py | 1 + nova/tests/vmwareapi/db_fakes.py | 2 +- 3 files changed, 5 insertions(+), 1 deletion(-) diff --git a/nova/compute/api.py b/nova/compute/api.py index 3664be5ed..0f993015b 100644 --- a/nova/compute/api.py +++ b/nova/compute/api.py @@ -36,6 +36,7 @@ from nova import utils from nova import volume from nova.compute import instance_types from nova.compute import power_state +from nova.compute import task_states from nova.compute import vm_states from nova.compute.utils import terminate_volumes from nova.scheduler import api as scheduler_api @@ -397,6 +398,8 @@ class API(base.Base): updates['display_name'] = "Server %s" % instance_id instance['display_name'] = updates['display_name'] updates['hostname'] = self.hostname_factory(instance) + updates['vm_state'] = vm_states.BUILD + updates['task_state'] = task_states.SCHEDULING instance = self.update(context, instance_id, **updates) return instance diff --git a/nova/compute/task_states.py b/nova/compute/task_states.py index 55466c783..885a30ebe 100644 --- a/nova/compute/task_states.py +++ b/nova/compute/task_states.py @@ -17,6 +17,7 @@ """Possible task states for instances""" +SCHEDULING='scheduling' BLOCK_DEVICE_MAPPING='block_device_mapping' NETWORKING='networking' SPAWN='spawn' diff --git a/nova/tests/vmwareapi/db_fakes.py b/nova/tests/vmwareapi/db_fakes.py index b046071c7..b56956f96 100644 --- a/nova/tests/vmwareapi/db_fakes.py +++ b/nova/tests/vmwareapi/db_fakes.py @@ -67,7 +67,7 @@ def stub_out_db_instance_api(stubs): 'kernel_id': values['kernel_id'], 'ramdisk_id': values['ramdisk_id'], 'vm_state': vm_states.BUILD, - 'task_state': task_states.SCHEDULE, + 'task_state': task_states.SCHEDULING, 'user_id': values['user_id'], 'project_id': values['project_id'], 'launch_time': time.strftime('%Y-%m-%dT%H:%M:%SZ', time.gmtime()), -- cgit From 7726b3d763a136347f2324e630f0a3cdc60a045b Mon Sep 17 00:00:00 2001 From: Anthony Young Date: Mon, 22 Aug 2011 14:08:03 -0700 Subject: Simple usage extension for nova. Uses db to calculate tenant_usage for specified time periods. Methods: index: return a list of tenant_usages, with option of incuding detailed server_usage show: returns a specific tenant_usage object tenant_usage object: tenant_usage.total_memory_mb_usage: sum of memory_mb * hours for all instances in tenant for this period tenant_usage.total_local_gb_usage: sum of local_gb * hours for all instances in tenant for this period tenant_usage.total_vcpus_usage: sum of vcpus * hours for all instances in tenant for this period tenant_usage.total_hours: sum of all instance hours for this period tenant_usage.server_usages: A detailed list of server_usages, which describe the usage of a specific server For larger instances db tables, indexes on instance.launched_at and instance.terminated_at should significantly help performance. --- nova/api/openstack/contrib/simple_tenant_usage.py | 268 +++++++++++++++++++++ .../openstack/contrib/test_simple_tenant_usage.py | 189 +++++++++++++++ 2 files changed, 457 insertions(+) create mode 100644 nova/api/openstack/contrib/simple_tenant_usage.py create mode 100644 nova/tests/api/openstack/contrib/test_simple_tenant_usage.py diff --git a/nova/api/openstack/contrib/simple_tenant_usage.py b/nova/api/openstack/contrib/simple_tenant_usage.py new file mode 100644 index 000000000..d578b2b67 --- /dev/null +++ b/nova/api/openstack/contrib/simple_tenant_usage.py @@ -0,0 +1,268 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2011 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import urlparse +import webob + +from datetime import datetime +from nova import db +from nova import exception +from nova import flags +from nova.compute import instance_types +from nova.api.openstack import extensions +from nova.api.openstack import views +from nova.db.sqlalchemy.session import get_session +from webob import exc + + +FLAGS = flags.FLAGS + +INSTANCE_FIELDS = ['id', + 'image_ref', + 'project_id', + 'user_id', + 'display_name', + 'state_description', + 'instance_type_id', + 'launched_at', + 'terminated_at'] + + +class SimpleTenantUsageController(object): + + def _get_instances_for_time_period(self, period_start, period_stop, + tenant_id): + tenant_clause = '' + if tenant_id: + tenant_clause = " and project_id='%s'" % tenant_id + + conn = get_session().connection() + rows = conn.execute("select %s from instances where \ + (terminated_at is NULL or terminated_at > '%s') \ + and (launched_at < '%s') %s" %\ + (','.join(INSTANCE_FIELDS), + period_start.isoformat(' '),\ + period_stop.isoformat(' '), + tenant_clause)).fetchall() + + return rows + + def _hours_for(self, instance, period_start, period_stop): + launched_at = instance['launched_at'] + terminated_at = instance['terminated_at'] + if terminated_at is not None: + if not isinstance(terminated_at, datetime): + terminated_at = datetime.strptime(terminated_at, + "%Y-%m-%d %H:%M:%S.%f") + + if launched_at is not None: + if not isinstance(launched_at, datetime): + launched_at = datetime.strptime(launched_at, + "%Y-%m-%d %H:%M:%S.%f") + + if terminated_at and terminated_at < period_start: + return 0 + # nothing if it started after the usage report ended + if launched_at and launched_at > period_stop: + return 0 + if launched_at: + # if instance launched after period_started, don't charge for first + start = max(launched_at, period_start) + if terminated_at: + # if instance stopped before period_stop, don't charge after + stop = min(period_stop, terminated_at) + else: + # instance is still running, so charge them up to current time + stop = period_stop + dt = stop - start + seconds = dt.days * 3600 * 24 + dt.seconds\ + + dt.microseconds / 100000.0 + + return seconds / 3600.0 + else: + # instance hasn't launched, so no charge + return 0 + + def _usage_for_period(self, context, period_start, + period_stop, tenant_id=None, detailed=True): + + rows = self._get_instances_for_time_period(period_start, + period_stop, + tenant_id) + rval = {} + flavors = {} + + for row in rows: + info = {} + for i in range(len(INSTANCE_FIELDS)): + info[INSTANCE_FIELDS[i]] = row[i] + info['hours'] = self._hours_for(info, period_start, period_stop) + flavor_type = info['instance_type_id'] + + if not flavors.get(flavor_type): + try: + flavors[flavor_type] = db.instance_type_get(context, + info['instance_type_id']) + except exception.InstanceTypeNotFound: + # can't bill if there is no instance type + continue + + flavor = flavors[flavor_type] + + info['name'] = info['display_name'] + del(info['display_name']) + + info['memory_mb'] = flavor['memory_mb'] + info['local_gb'] = flavor['local_gb'] + info['vcpus'] = flavor['vcpus'] + + info['tenant_id'] = info['project_id'] + del(info['project_id']) + + info['flavor'] = flavor['name'] + del(info['instance_type_id']) + + info['started_at'] = info['launched_at'] + del(info['launched_at']) + + info['ended_at'] = info['terminated_at'] + del(info['terminated_at']) + + if info['ended_at']: + info['state'] = 'terminated' + else: + info['state'] = info['state_description'] + + del(info['state_description']) + + now = datetime.utcnow() + + if info['state'] == 'terminated': + delta = self._parse_datetime(info['ended_at'])\ + - self._parse_datetime(info['started_at']) + else: + delta = now - self._parse_datetime(info['started_at']) + + info['uptime'] = delta.days * 24 * 60 + delta.seconds + + if not info['tenant_id'] in rval: + summary = {} + summary['tenant_id'] = info['tenant_id'] + if detailed: + summary['server_usages'] = [] + summary['total_local_gb_usage'] = 0 + summary['total_vcpus_usage'] = 0 + summary['total_memory_mb_usage'] = 0 + summary['total_hours'] = 0 + summary['start'] = period_start + summary['stop'] = period_stop + rval[info['tenant_id']] = summary + + summary = rval[info['tenant_id']] + summary['total_local_gb_usage'] += info['local_gb'] * info['hours'] + summary['total_vcpus_usage'] += info['vcpus'] * info['hours'] + summary['total_memory_mb_usage'] += info['memory_mb']\ + * info['hours'] + + summary['total_hours'] += info['hours'] + if detailed: + summary['server_usages'].append(info) + + return rval.values() + + def _parse_datetime(self, dtstr): + if isinstance(dtstr, datetime): + return dtstr + try: + return datetime.strptime(dtstr, "%Y-%m-%dT%H:%M:%S") + except: + try: + return datetime.strptime(dtstr, "%Y-%m-%dT%H:%M:%S.%f") + except: + return datetime.strptime(dtstr, "%Y-%m-%d %H:%M:%S.%f") + + def _get_datetime_range(self, req): + qs = req.environ.get('QUERY_STRING', '') + env = urlparse.parse_qs(qs) + period_start = self._parse_datetime(env.get('start', + [datetime.utcnow().isoformat()])[0]) + period_stop = self._parse_datetime(env.get('end', + [datetime.utcnow().isoformat()])[0]) + + detailed = bool(env.get('detailed', False)) + return (period_start, period_stop, detailed) + + def index(self, req): + """Retrive tenant_usage for all tenants""" + (period_start, period_stop, detailed) = self._get_datetime_range(req) + context = req.environ['nova.context'] + + if not context.is_admin and FLAGS.allow_admin_api: + return webob.Response(status_int=403) + + usages = self._usage_for_period(context, + period_start, + period_stop, + detailed=detailed) + return {'tenant_usages': usages} + + def show(self, req, id): + """Retrive tenant_usage for a specified tenant""" + (period_start, period_stop, ignore) = self._get_datetime_range(req) + context = req.environ['nova.context'] + + if not context.is_admin and FLAGS.allow_admin_api: + if id != context.project_id: + return webob.Response(status_int=403) + + usage = self._usage_for_period(context, + period_start, + period_stop, + id, + detailed=True) + if len(usage): + usage = usage[0] + else: + usage = {} + return {'tenant_usage': usage} + + +class Simple_tenant_usage(extensions.ExtensionDescriptor): + + def get_name(self): + return "Simple_tenant_usage" + + def get_alias(self): + return "os-simple-tenant-usage" + + def get_description(self): + return "Simple tenant usage extension" + + def get_namespace(self): + return "http://docs.openstack.org/ext/os-simple-tenant-usage/api/v1.1" + + def get_updated(self): + return "2011-08-19T00:00:00+00:00" + + def get_resources(self): + resources = [] + + res = extensions.ResourceExtension('os-simple-tenant-usage', + SimpleTenantUsageController()) + resources.append(res) + + return resources diff --git a/nova/tests/api/openstack/contrib/test_simple_tenant_usage.py b/nova/tests/api/openstack/contrib/test_simple_tenant_usage.py new file mode 100644 index 000000000..d20e36aaf --- /dev/null +++ b/nova/tests/api/openstack/contrib/test_simple_tenant_usage.py @@ -0,0 +1,189 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2011 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import datetime +import json +import webob + +from nova import context +from nova import db +from nova import flags +from nova import test +from nova.compute import instance_types +from nova.db.sqlalchemy import models +from nova.db.sqlalchemy import session +from nova.tests.api.openstack import fakes +from webob import exc + +from nova.api.openstack.contrib import simple_tenant_usage + + +FLAGS = flags.FLAGS + +SERVERS = 5 +TENANTS = 2 +HOURS = 24 +LOCAL_GB = 10 +MEMORY_MB = 1024 +VCPUS = 2 +STOP = datetime.datetime.utcnow() +START = STOP - datetime.timedelta(hours=HOURS) + + +def fake_get_session(): + class FakeFetcher(object): + def fetchall(fetcher_self): + # return 10 rows, 2 tenants, 5 servers each, each run for 1 day + return [get_fake_db_row(START, + STOP, + x, + "faketenant_%s" % (x / SERVERS)) + for x in xrange(TENANTS * SERVERS)] + + class FakeConn(object): + def execute(self, query): + return FakeFetcher() + + class FakeSession(object): + def connection(self): + return FakeConn() + + return FakeSession() + + +def fake_instance_type_get(context, instance_type_id): + return {'id': 1, + 'vcpus': VCPUS, + 'local_gb': LOCAL_GB, + 'memory_mb': MEMORY_MB, + 'name': + 'fakeflavor'} + + +def get_fake_db_row(start, end, instance_id, tenant_id): + return [instance_id, + '1', + tenant_id, + 'fakeuser', + 'name', + 'state', + 1, + start, + None] + + +class SimpleTenantUsageTest(test.TestCase): + def setUp(self): + super(SimpleTenantUsageTest, self).setUp() + self.stubs.Set(session, "get_session", + fake_get_session) + self.stubs.Set(db, "instance_type_get", + fake_instance_type_get) + self.admin_context = context.RequestContext('fakeadmin_0', + 'faketenant_0', + is_admin=True) + self.user_context = context.RequestContext('fakeadmin_0', + 'faketenant_0', + is_admin=False) + self.alt_user_context = context.RequestContext('fakeadmin_0', + 'faketenant_1', + is_admin=False) + FLAGS.allow_admin_api = True + + def test_verify_db_fields_exist_in_instance_model(self): + for field in simple_tenant_usage.INSTANCE_FIELDS: + self.assertTrue(field in models.Instance.__table__.columns) + + def test_verify_index(self): + req = webob.Request.blank( + '/v1.1/os-simple-tenant-usage?start=%s&end=%s' % + (START.isoformat(), STOP.isoformat())) + req.method = "GET" + req.headers["content-type"] = "application/json" + + res = req.get_response(fakes.wsgi_app( + fake_auth_context=self.admin_context)) + + self.assertEqual(res.status_int, 200) + res_dict = json.loads(res.body) + usages = res_dict['tenant_usages'] + for i in xrange(TENANTS): + self.assertEqual(int(usages[i]['total_hours']), + SERVERS * HOURS) + self.assertEqual(int(usages[i]['total_local_gb_usage']), + SERVERS * LOCAL_GB * HOURS) + self.assertEqual(int(usages[i]['total_memory_mb_usage']), + SERVERS * MEMORY_MB * HOURS) + self.assertEqual(int(usages[i]['total_vcpus_usage']), + SERVERS * VCPUS * HOURS) + self.assertFalse(usages[i].get('server_usages')) + + def test_verify_detailed_index(self): + req = webob.Request.blank( + '/v1.1/os-simple-tenant-usage?detailed=1&start=%s&end=%s' % + (START.isoformat(), STOP.isoformat())) + req.method = "GET" + req.headers["content-type"] = "application/json" + + res = req.get_response(fakes.wsgi_app( + fake_auth_context=self.admin_context)) + self.assertEqual(res.status_int, 200) + res_dict = json.loads(res.body) + usages = res_dict['tenant_usages'] + for i in xrange(TENANTS): + servers = usages[i]['server_usages'] + for j in xrange(SERVERS): + self.assertEqual(int(servers[j]['hours']), HOURS) + + def test_verify_index_fails_for_nonadmin(self): + req = webob.Request.blank( + '/v1.1/os-simple-tenant-usage?detailed=1&start=%s&end=%s' % + (START.isoformat(), STOP.isoformat())) + req.method = "GET" + req.headers["content-type"] = "application/json" + + res = req.get_response(fakes.wsgi_app()) + self.assertEqual(res.status_int, 403) + + def test_verify_show(self): + req = webob.Request.blank( + '/v1.1/os-simple-tenant-usage/faketenant_0?start=%s&end=%s' % + (START.isoformat(), STOP.isoformat())) + req.method = "GET" + req.headers["content-type"] = "application/json" + + res = req.get_response(fakes.wsgi_app( + fake_auth_context=self.user_context)) + self.assertEqual(res.status_int, 200) + res_dict = json.loads(res.body) + + usage = res_dict['tenant_usage'] + servers = usage['server_usages'] + self.assertEqual(len(usage['server_usages']), SERVERS) + for j in xrange(SERVERS): + self.assertEqual(int(servers[j]['hours']), HOURS) + + def test_verify_show_cant_view_other_tenant(self): + req = webob.Request.blank( + '/v1.1/os-simple-tenant-usage/faketenant_0?start=%s&end=%s' % + (START.isoformat(), STOP.isoformat())) + req.method = "GET" + req.headers["content-type"] = "application/json" + + res = req.get_response(fakes.wsgi_app( + fake_auth_context=self.alt_user_context)) + self.assertEqual(res.status_int, 403) -- cgit From 4b0944731a25d3cfcd30358619376dedf2251701 Mon Sep 17 00:00:00 2001 From: Anthony Young Date: Mon, 22 Aug 2011 14:31:26 -0700 Subject: some readability fixes per ja feedback --- nova/api/openstack/contrib/simple_tenant_usage.py | 29 ++++++++++++----------- 1 file changed, 15 insertions(+), 14 deletions(-) diff --git a/nova/api/openstack/contrib/simple_tenant_usage.py b/nova/api/openstack/contrib/simple_tenant_usage.py index d578b2b67..80d819365 100644 --- a/nova/api/openstack/contrib/simple_tenant_usage.py +++ b/nova/api/openstack/contrib/simple_tenant_usage.py @@ -97,8 +97,8 @@ class SimpleTenantUsageController(object): # instance hasn't launched, so no charge return 0 - def _usage_for_period(self, context, period_start, - period_stop, tenant_id=None, detailed=True): + def _tenant_usages_for_period(self, context, period_start, + period_stop, tenant_id=None, detailed=True): rows = self._get_instances_for_time_period(period_start, period_stop, @@ -208,32 +208,33 @@ class SimpleTenantUsageController(object): def index(self, req): """Retrive tenant_usage for all tenants""" - (period_start, period_stop, detailed) = self._get_datetime_range(req) context = req.environ['nova.context'] if not context.is_admin and FLAGS.allow_admin_api: return webob.Response(status_int=403) - usages = self._usage_for_period(context, - period_start, - period_stop, - detailed=detailed) + (period_start, period_stop, detailed) = self._get_datetime_range(req) + usages = self._tenant_usages_for_period(context, + period_start, + period_stop, + detailed=detailed) return {'tenant_usages': usages} def show(self, req, id): """Retrive tenant_usage for a specified tenant""" - (period_start, period_stop, ignore) = self._get_datetime_range(req) + tenant_id = id context = req.environ['nova.context'] if not context.is_admin and FLAGS.allow_admin_api: - if id != context.project_id: + if tenant_id != context.project_id: return webob.Response(status_int=403) - usage = self._usage_for_period(context, - period_start, - period_stop, - id, - detailed=True) + (period_start, period_stop, ignore) = self._get_datetime_range(req) + usage = self._tenant_usages_for_period(context, + period_start, + period_stop, + tenant_id=tenant_id, + detailed=True) if len(usage): usage = usage[0] else: -- cgit From de0a17310e7228aa96263243851a89fb016f9730 Mon Sep 17 00:00:00 2001 From: Anthony Young Date: Mon, 22 Aug 2011 15:21:31 -0700 Subject: remove extra spaces --- nova/api/openstack/contrib/simple_tenant_usage.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/nova/api/openstack/contrib/simple_tenant_usage.py b/nova/api/openstack/contrib/simple_tenant_usage.py index 80d819365..5f4218237 100644 --- a/nova/api/openstack/contrib/simple_tenant_usage.py +++ b/nova/api/openstack/contrib/simple_tenant_usage.py @@ -43,7 +43,6 @@ INSTANCE_FIELDS = ['id', class SimpleTenantUsageController(object): - def _get_instances_for_time_period(self, period_start, period_stop, tenant_id): tenant_clause = '' @@ -243,7 +242,6 @@ class SimpleTenantUsageController(object): class Simple_tenant_usage(extensions.ExtensionDescriptor): - def get_name(self): return "Simple_tenant_usage" -- cgit From 2fbaac5e07b558d7829253915523f073b07e24d4 Mon Sep 17 00:00:00 2001 From: Brian Lamar Date: Tue, 23 Aug 2011 10:57:47 -0400 Subject: PEP8 fixes --- nova/api/ec2/cloud.py | 18 ++++++++----- nova/compute/task_states.py | 63 ++++++++++++++++++++++----------------------- nova/compute/vm_states.py | 24 ++++++++--------- nova/tests/test_compute.py | 15 ++++++----- 4 files changed, 63 insertions(+), 57 deletions(-) diff --git a/nova/api/ec2/cloud.py b/nova/api/ec2/cloud.py index a7a343938..c5a360426 100644 --- a/nova/api/ec2/cloud.py +++ b/nova/api/ec2/cloud.py @@ -93,7 +93,7 @@ _STATE_DESCRIPTION_MAP = { vm_states.VERIFY_RESIZE: 'verify_resize', vm_states.PAUSED: 'pause', vm_states.SUSPENDED: 'suspend', - vm_states.RESCUED: 'rescue' + vm_states.RESCUED: 'rescue', } @@ -104,10 +104,12 @@ def state_description_from_vm_state(vm_state): # TODO(yamahata): hypervisor dependent default device name _DEFAULT_ROOT_DEVICE_NAME = '/dev/sda1' -_DEFAULT_MAPPINGS = {'ami': 'sda1', - 'ephemeral0': 'sda2', - 'root': _DEFAULT_ROOT_DEVICE_NAME, - 'swap': 'sda3'} +_DEFAULT_MAPPINGS = { + 'ami': 'sda1', + 'ephemeral0': 'sda2', + 'root': _DEFAULT_ROOT_DEVICE_NAME, + 'swap': 'sda3', +} def _parse_block_device_mapping(bdm): @@ -1064,8 +1066,10 @@ class CloudController(object): def _format_attr_instance_initiated_shutdown_behavior(instance, result): vm_state = instance['vm_state'] - state_to_value = {vm_states.STOPPED: 'stop', - vm_states.DELETED: 'terminate'} + state_to_value = { + vm_states.STOPPED: 'stop', + vm_states.DELETED: 'terminate', + } value = state_to_value.get(vm_state) if value: result['instanceInitiatedShutdownBehavior'] = value diff --git a/nova/compute/task_states.py b/nova/compute/task_states.py index d186edc3f..5f78495ea 100644 --- a/nova/compute/task_states.py +++ b/nova/compute/task_states.py @@ -17,35 +17,34 @@ """Possible task states for instances""" -SCHEDULING='scheduling' -BLOCK_DEVICE_MAPPING='block_device_mapping' -NETWORKING='networking' -SPAWN='spawn' - -SNAPSHOTTING='snapshotting' -BACKING_UP='backing_up' -PASSWORD='password' - -RESIZE_PREP='resize_prep' -RESIZE_MIGRATING='resize_migrating' -RESIZE_MIGRATED='resize_migrated' -RESIZE_FINISH='resize_finish' -RESIZE_REVERTING='resize_reverting' -RESIZE_CONFIRMING='resize_confirming' - -REBUILDING='rebuilding' - -REBOOTING='rebooting' -HARD_REBOOTING='hard_rebooting' -PAUSING='pausing' -UNPAUSING='unpausing' -SUSPENDING='suspending' -RESUMING='resuming' - -RESCUING='rescuing' -RESCUED='rescued' -UNRESCUING='unrescuing' - -DELETING='deleting' -STOPPING='stopping' -STARTING='starting' +SCHEDULING = 'scheduling' +BLOCK_DEVICE_MAPPING = 'block_device_mapping' +NETWORKING = 'networking' +SPAWN = 'spawn' + +SNAPSHOTTING = 'snapshotting' +BACKING_UP = 'backing_up' +PASSWORD = 'password' + +RESIZE_PREP = 'resize_prep' +RESIZE_MIGRATING = 'resize_migrating' +RESIZE_MIGRATED = 'resize_migrated' +RESIZE_FINISH = 'resize_finish' +RESIZE_REVERTING = 'resize_reverting' +RESIZE_CONFIRMING = 'resize_confirming' + +REBUILDING = 'rebuilding' + +REBOOTING = 'rebooting' +HARD_REBOOTING = 'hard_rebooting' +PAUSING = 'pausing' +UNPAUSING = 'unpausing' +SUSPENDING = 'suspending' +RESUMING = 'resuming' + +RESCUING = 'rescuing' +UNRESCUING = 'unrescuing' + +DELETING = 'deleting' +STOPPING = 'stopping' +STARTING = 'starting' diff --git a/nova/compute/vm_states.py b/nova/compute/vm_states.py index 342fa905e..560e6d688 100644 --- a/nova/compute/vm_states.py +++ b/nova/compute/vm_states.py @@ -17,18 +17,18 @@ """Possible vm states for instances""" -ACTIVE='active' -BUILDING='building' -REBUILDING='rebuilding' +ACTIVE = 'active' +BUILDING = 'building' +REBUILDING = 'rebuilding' -PAUSED='paused' -SUSPENDED='suspended' -RESCUED='rescued' -DELETED='deleted' -STOPPED='stopped' +PAUSED = 'paused' +SUSPENDED = 'suspended' +RESCUED = 'rescued' +DELETED = 'deleted' +STOPPED = 'stopped' -MIGRATING='migrating' -RESIZING='resizing' -VERIFY_RESIZE='verify_resize' +MIGRATING = 'migrating' +RESIZING = 'resizing' +VERIFY_RESIZE = 'verify_resize' -ERROR='error' +ERROR = 'error' diff --git a/nova/tests/test_compute.py b/nova/tests/test_compute.py index 2cf694d2c..11e1fd540 100644 --- a/nova/tests/test_compute.py +++ b/nova/tests/test_compute.py @@ -1308,14 +1308,17 @@ class ComputeTestCase(test.TestCase): """Test searching instances by state""" c = context.get_admin_context() - instance_id1 = self._create_instance({'power_state': power_state.SHUTDOWN}) + instance_id1 = self._create_instance({ + 'power_state': power_state.SHUTDOWN, + }) instance_id2 = self._create_instance({ - 'id': 2, - 'power_state': power_state.RUNNING}) + 'id': 2, + 'power_state': power_state.RUNNING, + }) instance_id3 = self._create_instance({ - 'id': 10, - 'power_state': power_state.RUNNING}) - + 'id': 10, + 'power_state': power_state.RUNNING, + }) instances = self.compute_api.get_all(c, search_opts={'power_state': power_state.SUSPENDED}) self.assertEqual(len(instances), 0) -- cgit From 657e58113d481d5c03cb3395cd714846434675f0 Mon Sep 17 00:00:00 2001 From: Brian Lamar Date: Tue, 23 Aug 2011 11:01:57 -0400 Subject: Updated migration number. --- .../versions/040_update_instance_states.py | 57 ---------------------- .../versions/042_update_instance_states.py | 57 ++++++++++++++++++++++ 2 files changed, 57 insertions(+), 57 deletions(-) delete mode 100644 nova/db/sqlalchemy/migrate_repo/versions/040_update_instance_states.py create mode 100644 nova/db/sqlalchemy/migrate_repo/versions/042_update_instance_states.py diff --git a/nova/db/sqlalchemy/migrate_repo/versions/040_update_instance_states.py b/nova/db/sqlalchemy/migrate_repo/versions/040_update_instance_states.py deleted file mode 100644 index 07efbf90f..000000000 --- a/nova/db/sqlalchemy/migrate_repo/versions/040_update_instance_states.py +++ /dev/null @@ -1,57 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright 2010 OpenStack LLC. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from sqlalchemy import MetaData, Table, Column, String - -meta = MetaData() - -c_task_state = Column('task_state', - String(length=255, convert_unicode=False, - assert_unicode=None, unicode_error=None, - _warn_on_bytestring=False), - nullable=True) - - -def upgrade(migrate_engine): - # Upgrade operations go here. Don't create your own engine; - # bind migrate_engine to your metadata - meta.bind = migrate_engine - - instances = Table('instances', meta, autoload=True, - autoload_with=migrate_engine) - - c_state = instances.c.state - c_state.alter(name='power_state') - - c_vm_state = instances.c.state_description - c_vm_state.alter(name='vm_state') - - instances.create_column(c_task_state) - - -def downgrade(migrate_engine): - meta.bind = migrate_engine - - instances = Table('instances', meta, autoload=True, - autoload_with=migrate_engine) - - c_state = instances.c.power_state - c_state.alter(name='state') - - c_vm_state = instances.c.vm_state - c_vm_state.alter(name='state_description') - - instances.drop_column('task_state') diff --git a/nova/db/sqlalchemy/migrate_repo/versions/042_update_instance_states.py b/nova/db/sqlalchemy/migrate_repo/versions/042_update_instance_states.py new file mode 100644 index 000000000..07efbf90f --- /dev/null +++ b/nova/db/sqlalchemy/migrate_repo/versions/042_update_instance_states.py @@ -0,0 +1,57 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 OpenStack LLC. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from sqlalchemy import MetaData, Table, Column, String + +meta = MetaData() + +c_task_state = Column('task_state', + String(length=255, convert_unicode=False, + assert_unicode=None, unicode_error=None, + _warn_on_bytestring=False), + nullable=True) + + +def upgrade(migrate_engine): + # Upgrade operations go here. Don't create your own engine; + # bind migrate_engine to your metadata + meta.bind = migrate_engine + + instances = Table('instances', meta, autoload=True, + autoload_with=migrate_engine) + + c_state = instances.c.state + c_state.alter(name='power_state') + + c_vm_state = instances.c.state_description + c_vm_state.alter(name='vm_state') + + instances.create_column(c_task_state) + + +def downgrade(migrate_engine): + meta.bind = migrate_engine + + instances = Table('instances', meta, autoload=True, + autoload_with=migrate_engine) + + c_state = instances.c.power_state + c_state.alter(name='state') + + c_vm_state = instances.c.vm_state + c_vm_state.alter(name='state_description') + + instances.drop_column('task_state') -- cgit From da02fc6e4191bdbbb2015b78f9c3fe5045bb0460 Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Tue, 23 Aug 2011 15:14:09 -0700 Subject: Fix not found exceptions to properly use ec2_ips for not found --- nova/api/ec2/__init__.py | 10 ++++++---- nova/exception.py | 1 + 2 files changed, 7 insertions(+), 4 deletions(-) diff --git a/nova/api/ec2/__init__.py b/nova/api/ec2/__init__.py index 5430f443d..363dad7cd 100644 --- a/nova/api/ec2/__init__.py +++ b/nova/api/ec2/__init__.py @@ -392,17 +392,19 @@ class Executor(wsgi.Application): except exception.InstanceNotFound as ex: LOG.info(_('InstanceNotFound raised: %s'), unicode(ex), context=context) - return self._error(req, context, type(ex).__name__, ex.message) + ec2_id = ec2utils.id_to_ec2_id(ex.kwargs['instance_id']) + message = ex.message % {'instance_id': ec2_id} + return self._error(req, context, type(ex).__name__, message) except exception.VolumeNotFound as ex: LOG.info(_('VolumeNotFound raised: %s'), unicode(ex), context=context) - ec2_id = ec2utils.id_to_ec2_vol_id(ex.volume_id) - message = _('Volume %s not found') % ec2_id + ec2_id = ec2utils.id_to_ec2_vol_id(ex.kwargs['volume_id']) + message = ex.message % {'volume_id': ec2_id} return self._error(req, context, type(ex).__name__, message) except exception.SnapshotNotFound as ex: LOG.info(_('SnapshotNotFound raised: %s'), unicode(ex), context=context) - ec2_id = ec2utils.id_to_ec2_snap_id(ex.snapshot_id) + ec2_id = ec2utils.id_to_ec2_snap_id(ex.kwargs['snapshot_id']) message = _('Snapshot %s not found') % ec2_id return self._error(req, context, type(ex).__name__, message) except exception.NotFound as ex: diff --git a/nova/exception.py b/nova/exception.py index 66740019b..5b86059d8 100644 --- a/nova/exception.py +++ b/nova/exception.py @@ -146,6 +146,7 @@ class NovaException(Exception): message = _("An unknown exception occurred.") def __init__(self, **kwargs): + self.kwargs = kwargs try: self._error_string = self.message % kwargs -- cgit From 8b3ac90bd53ec81e6669c6169969e1e8da3e2d4f Mon Sep 17 00:00:00 2001 From: Brian Lamar Date: Wed, 24 Aug 2011 10:59:12 -0400 Subject: Commit with test data in migration. --- nova/compute/api.py | 1 - .../versions/042_update_instance_states.py | 165 ++++++++++++++++++--- nova/exception.py | 3 + 3 files changed, 149 insertions(+), 20 deletions(-) diff --git a/nova/compute/api.py b/nova/compute/api.py index a373bed90..eeb8f47d9 100644 --- a/nova/compute/api.py +++ b/nova/compute/api.py @@ -761,7 +761,6 @@ class API(base.Base): self.update(context, instance_id, - vm_state=vm_states.ACTIVE, task_state=task_states.DELETING) host = instance['host'] diff --git a/nova/db/sqlalchemy/migrate_repo/versions/042_update_instance_states.py b/nova/db/sqlalchemy/migrate_repo/versions/042_update_instance_states.py index 07efbf90f..e27b84176 100644 --- a/nova/db/sqlalchemy/migrate_repo/versions/042_update_instance_states.py +++ b/nova/db/sqlalchemy/migrate_repo/versions/042_update_instance_states.py @@ -14,44 +14,171 @@ # License for the specific language governing permissions and limitations # under the License. +import sqlalchemy from sqlalchemy import MetaData, Table, Column, String +from nova import log +from nova.compute import task_states +from nova.compute import vm_states +from nova.db.sqlalchemy import models + + +LOG = log.getLogger("farts") meta = MetaData() c_task_state = Column('task_state', - String(length=255, convert_unicode=False, - assert_unicode=None, unicode_error=None, - _warn_on_bytestring=False), - nullable=True) + String(length=255, convert_unicode=False, + assert_unicode=None, unicode_error=None, + _warn_on_bytestring=False), + nullable=True) + + +_upgrade_translations = { + "stopping": { + "vm_state": vm_states.ACTIVE, + "task_state": task_states.STOPPING, + }, + "stopped": { + "vm_state": vm_states.STOPPED, + "task_state": None, + }, + "terminated": { + "vm_state": vm_states.DELETED, + "task_state": None, + }, + "terminating": { + "vm_state": vm_states.ACTIVE, + "task_state": task_states.DELETING, + }, + "running": { + "vm_state": vm_states.ACTIVE, + "task_state": None, + }, + "scheduling": { + "vm_state": vm_states.BUILDING, + "task_state": task_states.SCHEDULING, + }, + "migrating": { + "vm_state": vm_states.MIGRATING, + "task_state": None, + }, + "pending": { + "vm_state": vm_states.BUILDING, + "task_state": task_states.SCHEDULING, + }, +} + + +_downgrade_translations = { + vm_states.ACTIVE: { + None: "running", + task_states.DELETING: "terminating", + task_states.STOPPING: "stopping", + }, + vm_states.BUILDING: { + None: "pending", + task_states.SCHEDULING: "scheduling", + }, + vm_states.STOPPED: { + None: "stopped", + }, + vm_states.REBUILDING: { + None: "pending", + }, + vm_states.DELETED: { + None: "terminated", + }, + vm_states.MIGRATING: { + None: "migrating", + }, +} + + +def _insert_test_data(instance_table): + running_instance = models.Instance() + running_instance.state_description = "running" + stopped_instance = models.Instance() + stopped_instance.state_description = "stopped" + terminated_instance = models.Instance() + terminated_instance.state_description = "terminated" + migrating_instance = models.Instance() + migrating_instance.state_description = "migrating" + scheduling_instance = models.Instance() + scheduling_instance.state_description = "scheduling" + bad_instance = models.Instance() + bad_instance.state_description = "bad_state_description" + + instance_table.insert(running_instance).execute() + instance_table.insert(stopped_instance).execute() + instance_table.insert(terminated_instance).execute() + instance_table.insert(migrating_instance).execute() + instance_table.insert(scheduling_instance).execute() + instance_table.insert(bad_instance).execute() def upgrade(migrate_engine): - # Upgrade operations go here. Don't create your own engine; - # bind migrate_engine to your metadata + #migrate_engine.echo = True meta.bind = migrate_engine - instances = Table('instances', meta, autoload=True, - autoload_with=migrate_engine) - - c_state = instances.c.state + instance_table = Table('instances', meta, autoload=True, + autoload_with=migrate_engine) + _insert_test_data(instance_table) + for instance in instance_table.select().execute(): + LOG.info(instance) + c_state = instance_table.c.state c_state.alter(name='power_state') - c_vm_state = instances.c.state_description + c_vm_state = instance_table.c.state_description c_vm_state.alter(name='vm_state') - instances.create_column(c_task_state) + instance_table.create_column(c_task_state) + for old_state, values in _upgrade_translations.iteritems(): + new_values = { + "old_state": old_state, + "vm_state": values["vm_state"], + "task_state": values["task_state"], + } -def downgrade(migrate_engine): - meta.bind = migrate_engine + update = sqlalchemy.text("UPDATE instances SET task_state=:task_state " + "WHERE vm_state=:old_state") + migrate_engine.execute(update, **new_values) + + update = sqlalchemy.text("UPDATE instances SET vm_state=:vm_state " + "WHERE vm_state=:old_state") + migrate_engine.execute(update, **new_values) - instances = Table('instances', meta, autoload=True, - autoload_with=migrate_engine) + for instance in instance_table.select().execute(): + LOG.info(instance) - c_state = instances.c.power_state + meta.bind = migrate_engine + + instance_table = Table('instances', meta, autoload=True, + autoload_with=migrate_engine) + + for old_vm_state, old_task_states in _downgrade_translations.iteritems(): + for old_task_state, new_state_desc in old_task_states.iteritems(): + if old_task_state: + update = sqlalchemy.text("UPDATE instances " + "SET vm_state=:new_state_desc " + "WHERE task_state=:old_task_state " + "AND vm_state=:old_vm_state") + migrate_engine.execute(update, locals()) + else: + update = sqlalchemy.text("UPDATE instances " + "SET vm_state=:new_state_desc " + "WHERE vm_state=:old_vm_state") + migrate_engine.execute(update, locals()) + + #c_state = instance_table.c.power_state c_state.alter(name='state') - c_vm_state = instances.c.vm_state + #c_vm_state = instance_table.c.vm_state c_vm_state.alter(name='state_description') - instances.drop_column('task_state') + instance_table.drop_column('task_state') + + for instance in instance_table.select().execute(): + LOG.info(instance) + + raise Exception() diff --git a/nova/exception.py b/nova/exception.py index 44af8177e..66740019b 100644 --- a/nova/exception.py +++ b/nova/exception.py @@ -318,6 +318,9 @@ class InvalidEc2Id(Invalid): class NotFound(NovaException): message = _("Resource could not be found.") + def __init__(self, *args, **kwargs): + super(NotFound, self).__init__(**kwargs) + class FlagNotSet(NotFound): message = _("Required flag %(flag)s not set.") -- cgit From df77c6c168d4370ec582ffbccd43e3b9cb551b98 Mon Sep 17 00:00:00 2001 From: Brian Lamar Date: Wed, 24 Aug 2011 11:00:21 -0400 Subject: Commit without test data in migration. --- .../versions/042_update_instance_states.py | 43 +++------------------- 1 file changed, 5 insertions(+), 38 deletions(-) diff --git a/nova/db/sqlalchemy/migrate_repo/versions/042_update_instance_states.py b/nova/db/sqlalchemy/migrate_repo/versions/042_update_instance_states.py index e27b84176..10704d0da 100644 --- a/nova/db/sqlalchemy/migrate_repo/versions/042_update_instance_states.py +++ b/nova/db/sqlalchemy/migrate_repo/versions/042_update_instance_states.py @@ -17,15 +17,13 @@ import sqlalchemy from sqlalchemy import MetaData, Table, Column, String -from nova import log from nova.compute import task_states from nova.compute import vm_states -from nova.db.sqlalchemy import models -LOG = log.getLogger("farts") meta = MetaData() + c_task_state = Column('task_state', String(length=255, convert_unicode=False, assert_unicode=None, unicode_error=None, @@ -94,37 +92,12 @@ _downgrade_translations = { } -def _insert_test_data(instance_table): - running_instance = models.Instance() - running_instance.state_description = "running" - stopped_instance = models.Instance() - stopped_instance.state_description = "stopped" - terminated_instance = models.Instance() - terminated_instance.state_description = "terminated" - migrating_instance = models.Instance() - migrating_instance.state_description = "migrating" - scheduling_instance = models.Instance() - scheduling_instance.state_description = "scheduling" - bad_instance = models.Instance() - bad_instance.state_description = "bad_state_description" - - instance_table.insert(running_instance).execute() - instance_table.insert(stopped_instance).execute() - instance_table.insert(terminated_instance).execute() - instance_table.insert(migrating_instance).execute() - instance_table.insert(scheduling_instance).execute() - instance_table.insert(bad_instance).execute() - - def upgrade(migrate_engine): - #migrate_engine.echo = True meta.bind = migrate_engine instance_table = Table('instances', meta, autoload=True, autoload_with=migrate_engine) - _insert_test_data(instance_table) - for instance in instance_table.select().execute(): - LOG.info(instance) + c_state = instance_table.c.state c_state.alter(name='power_state') @@ -148,9 +121,8 @@ def upgrade(migrate_engine): "WHERE vm_state=:old_state") migrate_engine.execute(update, **new_values) - for instance in instance_table.select().execute(): - LOG.info(instance) +def downgrade(migrate_engine): meta.bind = migrate_engine instance_table = Table('instances', meta, autoload=True, @@ -170,15 +142,10 @@ def upgrade(migrate_engine): "WHERE vm_state=:old_vm_state") migrate_engine.execute(update, locals()) - #c_state = instance_table.c.power_state + c_state = instance_table.c.power_state c_state.alter(name='state') - #c_vm_state = instance_table.c.vm_state + c_vm_state = instance_table.c.vm_state c_vm_state.alter(name='state_description') instance_table.drop_column('task_state') - - for instance in instance_table.select().execute(): - LOG.info(instance) - - raise Exception() -- cgit From 1ee1bda6cd164bd1e3cc400838830a747371ce9e Mon Sep 17 00:00:00 2001 From: Brian Lamar Date: Wed, 24 Aug 2011 11:27:02 -0400 Subject: Conversion to SQLAlchemy-style. --- .../versions/042_update_instance_states.py | 55 +++++++++------------- 1 file changed, 21 insertions(+), 34 deletions(-) diff --git a/nova/db/sqlalchemy/migrate_repo/versions/042_update_instance_states.py b/nova/db/sqlalchemy/migrate_repo/versions/042_update_instance_states.py index 10704d0da..1005ee8a4 100644 --- a/nova/db/sqlalchemy/migrate_repo/versions/042_update_instance_states.py +++ b/nova/db/sqlalchemy/migrate_repo/versions/042_update_instance_states.py @@ -33,35 +33,35 @@ c_task_state = Column('task_state', _upgrade_translations = { "stopping": { - "vm_state": vm_states.ACTIVE, + "state_description": vm_states.ACTIVE, "task_state": task_states.STOPPING, }, "stopped": { - "vm_state": vm_states.STOPPED, + "state_description": vm_states.STOPPED, "task_state": None, }, "terminated": { - "vm_state": vm_states.DELETED, + "state_description": vm_states.DELETED, "task_state": None, }, "terminating": { - "vm_state": vm_states.ACTIVE, + "state_description": vm_states.ACTIVE, "task_state": task_states.DELETING, }, "running": { - "vm_state": vm_states.ACTIVE, + "state_description": vm_states.ACTIVE, "task_state": None, }, "scheduling": { - "vm_state": vm_states.BUILDING, + "state_description": vm_states.BUILDING, "task_state": task_states.SCHEDULING, }, "migrating": { - "vm_state": vm_states.MIGRATING, + "state_description": vm_states.MIGRATING, "task_state": None, }, "pending": { - "vm_state": vm_states.BUILDING, + "state_description": vm_states.BUILDING, "task_state": task_states.SCHEDULING, }, } @@ -107,19 +107,10 @@ def upgrade(migrate_engine): instance_table.create_column(c_task_state) for old_state, values in _upgrade_translations.iteritems(): - new_values = { - "old_state": old_state, - "vm_state": values["vm_state"], - "task_state": values["task_state"], - } - - update = sqlalchemy.text("UPDATE instances SET task_state=:task_state " - "WHERE vm_state=:old_state") - migrate_engine.execute(update, **new_values) - - update = sqlalchemy.text("UPDATE instances SET vm_state=:vm_state " - "WHERE vm_state=:old_state") - migrate_engine.execute(update, **new_values) + instance_table.update().\ + values(**values).\ + where(c_vm_state == old_state).\ + execute() def downgrade(migrate_engine): @@ -128,19 +119,7 @@ def downgrade(migrate_engine): instance_table = Table('instances', meta, autoload=True, autoload_with=migrate_engine) - for old_vm_state, old_task_states in _downgrade_translations.iteritems(): - for old_task_state, new_state_desc in old_task_states.iteritems(): - if old_task_state: - update = sqlalchemy.text("UPDATE instances " - "SET vm_state=:new_state_desc " - "WHERE task_state=:old_task_state " - "AND vm_state=:old_vm_state") - migrate_engine.execute(update, locals()) - else: - update = sqlalchemy.text("UPDATE instances " - "SET vm_state=:new_state_desc " - "WHERE vm_state=:old_vm_state") - migrate_engine.execute(update, locals()) + c_task_state = instance_table.c.task_state c_state = instance_table.c.power_state c_state.alter(name='state') @@ -148,4 +127,12 @@ def downgrade(migrate_engine): c_vm_state = instance_table.c.vm_state c_vm_state.alter(name='state_description') + for old_vm_state, old_task_states in _downgrade_translations.iteritems(): + for old_task_state, new_state_desc in old_task_states.iteritems(): + instance_table.update().\ + where(c_task_state == old_task_state).\ + where(c_vm_state == old_vm_state).\ + values(state_description=new_state_desc).\ + execute() + instance_table.drop_column('task_state') -- cgit From 53b0a2ea13e148fc5f461211ca9056b30db6c43d Mon Sep 17 00:00:00 2001 From: Brian Lamar Date: Wed, 24 Aug 2011 11:32:58 -0400 Subject: Fix for migrations. --- .../versions/042_update_instance_states.py | 19 +++++++++++-------- 1 file changed, 11 insertions(+), 8 deletions(-) diff --git a/nova/db/sqlalchemy/migrate_repo/versions/042_update_instance_states.py b/nova/db/sqlalchemy/migrate_repo/versions/042_update_instance_states.py index 1005ee8a4..65bdf601d 100644 --- a/nova/db/sqlalchemy/migrate_repo/versions/042_update_instance_states.py +++ b/nova/db/sqlalchemy/migrate_repo/versions/042_update_instance_states.py @@ -33,35 +33,35 @@ c_task_state = Column('task_state', _upgrade_translations = { "stopping": { - "state_description": vm_states.ACTIVE, + "vm_state": vm_states.ACTIVE, "task_state": task_states.STOPPING, }, "stopped": { - "state_description": vm_states.STOPPED, + "vm_state": vm_states.STOPPED, "task_state": None, }, "terminated": { - "state_description": vm_states.DELETED, + "vm_state": vm_states.DELETED, "task_state": None, }, "terminating": { - "state_description": vm_states.ACTIVE, + "vm_state": vm_states.ACTIVE, "task_state": task_states.DELETING, }, "running": { - "state_description": vm_states.ACTIVE, + "vm_state": vm_states.ACTIVE, "task_state": None, }, "scheduling": { - "state_description": vm_states.BUILDING, + "vm_state": vm_states.BUILDING, "task_state": task_states.SCHEDULING, }, "migrating": { - "state_description": vm_states.MIGRATING, + "vm_state": vm_states.MIGRATING, "task_state": None, }, "pending": { - "state_description": vm_states.BUILDING, + "vm_state": vm_states.BUILDING, "task_state": task_states.SCHEDULING, }, } @@ -106,6 +106,9 @@ def upgrade(migrate_engine): instance_table.create_column(c_task_state) + instance_table = Table('instances', meta, autoload=True, + autoload_with=migrate_engine) + for old_state, values in _upgrade_translations.iteritems(): instance_table.update().\ values(**values).\ -- cgit From 007efcab4b668e7a4b1d26ff274693824f6d7445 Mon Sep 17 00:00:00 2001 From: Brian Lamar Date: Wed, 24 Aug 2011 12:26:05 -0400 Subject: Attempt to fix issue when deleting an instance when it's still in BUILD. --- nova/compute/manager.py | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/nova/compute/manager.py b/nova/compute/manager.py index 0a1dc13be..802d141ef 100644 --- a/nova/compute/manager.py +++ b/nova/compute/manager.py @@ -429,10 +429,15 @@ class ComputeManager(manager.SchedulerDependentManager): return current_power_state = self._get_power_state(context, instance) + if current_power_state == power_state.RUNNING: + vm_state = vm_states.ACTIVE + else: + vm_state = vm_states.BUILDING + self._instance_update(context, instance_id, power_state=current_power_state, - vm_state=vm_states.ACTIVE, + vm_state=vm_state, task_state=None, launched_at=utils.utcnow()) -- cgit From ca8b3c7635208ab0776f51661708ecea1bfc222a Mon Sep 17 00:00:00 2001 From: Brian Lamar Date: Wed, 24 Aug 2011 12:42:42 -0400 Subject: Fixed rebuild naming issue and reverted other fix which didn't fix anythin. --- nova/compute/api.py | 2 ++ nova/compute/manager.py | 7 +------ 2 files changed, 3 insertions(+), 6 deletions(-) diff --git a/nova/compute/api.py b/nova/compute/api.py index eeb8f47d9..02bae7262 100644 --- a/nova/compute/api.py +++ b/nova/compute/api.py @@ -1045,6 +1045,8 @@ class API(base.Base): metadata=None, files_to_inject=None): """Rebuild the given instance with the provided metadata.""" instance = db.api.instance_get(context, instance_id) + name = name or instance["display_name"] + invalid_rebuild_states = [ vm_states.BUILDING, vm_states.REBUILDING, diff --git a/nova/compute/manager.py b/nova/compute/manager.py index 802d141ef..0a1dc13be 100644 --- a/nova/compute/manager.py +++ b/nova/compute/manager.py @@ -429,15 +429,10 @@ class ComputeManager(manager.SchedulerDependentManager): return current_power_state = self._get_power_state(context, instance) - if current_power_state == power_state.RUNNING: - vm_state = vm_states.ACTIVE - else: - vm_state = vm_states.BUILDING - self._instance_update(context, instance_id, power_state=current_power_state, - vm_state=vm_state, + vm_state=vm_states.ACTIVE, task_state=None, launched_at=utils.utcnow()) -- cgit From 64f946a6a0a6e08d7046ab98776928abe24f8d93 Mon Sep 17 00:00:00 2001 From: Brian Lamar Date: Wed, 24 Aug 2011 13:01:20 -0400 Subject: Fix for trying rebuilds when instance is not active. --- nova/api/openstack/servers.py | 10 ++++------ nova/compute/api.py | 11 +++-------- nova/exception.py | 2 +- 3 files changed, 8 insertions(+), 15 deletions(-) diff --git a/nova/api/openstack/servers.py b/nova/api/openstack/servers.py index 4ff9264a6..c5fdda1cf 100644 --- a/nova/api/openstack/servers.py +++ b/nova/api/openstack/servers.py @@ -604,9 +604,8 @@ class ControllerV10(Controller): try: self.compute_api.rebuild(context, instance_id, image_id) - except exception.BuildInProgress: - msg = _("Instance %s is currently being rebuilt.") % instance_id - LOG.debug(msg) + except exception.RebuildRequiresActiveInstance: + msg = _("Instance %s must be active to rebuild.") % instance_id raise exc.HTTPConflict(explanation=msg) return webob.Response(status_int=202) @@ -742,9 +741,8 @@ class ControllerV11(Controller): try: self.compute_api.rebuild(context, instance_id, image_href, name, metadata, personalities) - except exception.BuildInProgress: - msg = _("Instance %s is currently being rebuilt.") % instance_id - LOG.debug(msg) + except exception.RebuildRequiresActiveInstance: + msg = _("Instance %s must be active to rebuild.") % instance_id raise exc.HTTPConflict(explanation=msg) return webob.Response(status_int=202) diff --git a/nova/compute/api.py b/nova/compute/api.py index 02bae7262..7ed41fbbc 100644 --- a/nova/compute/api.py +++ b/nova/compute/api.py @@ -1047,14 +1047,9 @@ class API(base.Base): instance = db.api.instance_get(context, instance_id) name = name or instance["display_name"] - invalid_rebuild_states = [ - vm_states.BUILDING, - vm_states.REBUILDING, - ] - - if instance["vm_state"] in invalid_rebuild_states: - msg = _("Instance already building") - raise exception.BuildInProgress(msg) + if instance["vm_state"] != vm_states.ACTIVE: + msg = _("Instance must be active to rebuild.") + raise exception.RebuildRequiresActiveInstance(msg) files_to_inject = files_to_inject or [] metadata = metadata or {} diff --git a/nova/exception.py b/nova/exception.py index 66740019b..889d36c96 100644 --- a/nova/exception.py +++ b/nova/exception.py @@ -61,7 +61,7 @@ class ApiError(Error): super(ApiError, self).__init__(outstr) -class BuildInProgress(Error): +class RebuildRequiresActiveInstance(Error): pass -- cgit From 1c7002db8be430cded6efb7378103e17c8df21b4 Mon Sep 17 00:00:00 2001 From: Brian Lamar Date: Wed, 24 Aug 2011 13:24:37 -0400 Subject: Fixed issue where we were setting the state to DELETED before it's actually deleted. --- nova/compute/manager.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/nova/compute/manager.py b/nova/compute/manager.py index 7697e8e6c..924799dc4 100644 --- a/nova/compute/manager.py +++ b/nova/compute/manager.py @@ -490,15 +490,15 @@ class ComputeManager(manager.SchedulerDependentManager): def terminate_instance(self, context, instance_id): """Terminate an instance on this host.""" self._shutdown_instance(context, instance_id, 'Terminating') - instance = self.db.instance_get(context.elevated(), instance_id) + self.db.instance_destroy(context, instance_id) + self._instance_update(context, instance_id, vm_state=vm_states.DELETED, task_state=None, terminated_at=utils.utcnow()) - # TODO(ja): should we keep it in a terminated state for a bit? - self.db.instance_destroy(context, instance_id) + instance = self.db.instance_get(context.elevated(), instance_id) usage_info = utils.usage_from_instance(instance) notifier.notify('compute.%s' % self.host, 'compute.instance.delete', -- cgit From a6bf7c0b2522509dda8dd5e537fad49665aa2af2 Mon Sep 17 00:00:00 2001 From: Brian Lamar Date: Wed, 24 Aug 2011 13:27:23 -0400 Subject: Added DELETED status to OSAPI just in case. --- nova/api/openstack/common.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/nova/api/openstack/common.py b/nova/api/openstack/common.py index 90b2095b8..07c6fbd11 100644 --- a/nova/api/openstack/common.py +++ b/nova/api/openstack/common.py @@ -76,6 +76,9 @@ _STATE_MAP = { vm_states.ERROR: { 'default': 'ERROR', }, + vm_states.DELETED: { + 'default': 'DELETED', + }, } -- cgit From 0343a328e66557abda9d0817558ad09a73962eb9 Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Wed, 24 Aug 2011 14:39:47 -0700 Subject: change snapshot msg too --- nova/api/ec2/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nova/api/ec2/__init__.py b/nova/api/ec2/__init__.py index 363dad7cd..1e176e52d 100644 --- a/nova/api/ec2/__init__.py +++ b/nova/api/ec2/__init__.py @@ -405,7 +405,7 @@ class Executor(wsgi.Application): LOG.info(_('SnapshotNotFound raised: %s'), unicode(ex), context=context) ec2_id = ec2utils.id_to_ec2_snap_id(ex.kwargs['snapshot_id']) - message = _('Snapshot %s not found') % ec2_id + message = ex.message % {'snapshot_id': ec2_id} return self._error(req, context, type(ex).__name__, message) except exception.NotFound as ex: LOG.info(_('NotFound raised: %s'), unicode(ex), context=context) -- cgit From 575f72693fa20c7c4157c8ce9702751cd54f1a82 Mon Sep 17 00:00:00 2001 From: Brian Lamar Date: Wed, 24 Aug 2011 18:00:03 -0400 Subject: Fixed silly ordering issue which was causing tons of test failures. --- nova/compute/manager.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/nova/compute/manager.py b/nova/compute/manager.py index 924799dc4..c29eef07f 100644 --- a/nova/compute/manager.py +++ b/nova/compute/manager.py @@ -490,15 +490,15 @@ class ComputeManager(manager.SchedulerDependentManager): def terminate_instance(self, context, instance_id): """Terminate an instance on this host.""" self._shutdown_instance(context, instance_id, 'Terminating') - self.db.instance_destroy(context, instance_id) - + instance = self.db.instance_get(context.elevated(), instance_id) self._instance_update(context, instance_id, vm_state=vm_states.DELETED, task_state=None, terminated_at=utils.utcnow()) - instance = self.db.instance_get(context.elevated(), instance_id) + self.db.instance_destroy(context, instance_id) + usage_info = utils.usage_from_instance(instance) notifier.notify('compute.%s' % self.host, 'compute.instance.delete', -- cgit From 5f1380bfc69913f6aeb2a64e3501f77973493bc3 Mon Sep 17 00:00:00 2001 From: Brian Lamar Date: Wed, 24 Aug 2011 18:14:13 -0400 Subject: Added fix for parallel build test. --- nova/tests/api/openstack/test_server_actions.py | 2 +- run_tests.py | 2 ++ 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/nova/tests/api/openstack/test_server_actions.py b/nova/tests/api/openstack/test_server_actions.py index 0cbbe271d..eae74e75e 100644 --- a/nova/tests/api/openstack/test_server_actions.py +++ b/nova/tests/api/openstack/test_server_actions.py @@ -35,7 +35,7 @@ def return_server_with_attributes(**kwargs): return _return_server -def return_server_with_state(vm_state, task_state=None) +def return_server_with_state(vm_state, task_state=None): return return_server_with_attributes(vm_state=vm_state, task_state=task_state) diff --git a/run_tests.py b/run_tests.py index fd836967e..b9a74769e 100644 --- a/run_tests.py +++ b/run_tests.py @@ -55,6 +55,7 @@ To run a single test module: """ +import eventlet import gettext import heapq import os @@ -62,6 +63,7 @@ import unittest import sys import time +eventlet.monkey_patch() gettext.install('nova', unicode=1) from nose import config -- cgit From 6c4329f846685ee54c5265e5cc56e58e6fbd55e9 Mon Sep 17 00:00:00 2001 From: Brian Lamar Date: Wed, 24 Aug 2011 18:25:21 -0400 Subject: stub_instance fix from merge conflict --- nova/tests/api/openstack/test_server_actions.py | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/nova/tests/api/openstack/test_server_actions.py b/nova/tests/api/openstack/test_server_actions.py index eae74e75e..6fb21fad3 100644 --- a/nova/tests/api/openstack/test_server_actions.py +++ b/nova/tests/api/openstack/test_server_actions.py @@ -46,7 +46,8 @@ def return_server_with_uuid_and_state(vm_state, task_state=None): return _return_server -def stub_instance(id, metadata=None, image_ref="10", flavor_id="1", name=None): +def stub_instance(id, metadata=None, image_ref="10", flavor_id="1", + name=None, vm_state=None, task_state=None): if metadata is not None: metadata_items = [{'key':k, 'value':v} for k, v in metadata.items()] else: @@ -67,8 +68,8 @@ def stub_instance(id, metadata=None, image_ref="10", flavor_id="1", name=None): "launch_index": 0, "key_name": "", "key_data": "", - "vm_state": vm_states.ACTIVE, - "task_state": None, + "vm_state": vm_state or vm_states.ACTIVE, + "task_state": task_state, "memory_mb": 0, "vcpus": 0, "local_gb": 0, -- cgit From f0fcc4ba61b4658b1e28bd69cfcf395cc408496a Mon Sep 17 00:00:00 2001 From: Brian Lamar Date: Thu, 25 Aug 2011 09:04:04 -0400 Subject: Another attempt at fixing hanging test. --- nova/tests/test_xenapi.py | 2 ++ run_tests.py | 2 -- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/nova/tests/test_xenapi.py b/nova/tests/test_xenapi.py index 2f0559366..061e9ffea 100644 --- a/nova/tests/test_xenapi.py +++ b/nova/tests/test_xenapi.py @@ -24,6 +24,8 @@ import re import stubout import ast +eventlet.monkey_patch() + from nova import db from nova import context from nova import flags diff --git a/run_tests.py b/run_tests.py index b9a74769e..fd836967e 100644 --- a/run_tests.py +++ b/run_tests.py @@ -55,7 +55,6 @@ To run a single test module: """ -import eventlet import gettext import heapq import os @@ -63,7 +62,6 @@ import unittest import sys import time -eventlet.monkey_patch() gettext.install('nova', unicode=1) from nose import config -- cgit From 881fb85c9a74fc3436d07d3cf3876c2f815b5618 Mon Sep 17 00:00:00 2001 From: Brian Lamar Date: Thu, 25 Aug 2011 10:59:04 -0400 Subject: Set state to RESIZING during resizing... --- nova/compute/api.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nova/compute/api.py b/nova/compute/api.py index 960d28bd0..89e44258c 100644 --- a/nova/compute/api.py +++ b/nova/compute/api.py @@ -1165,7 +1165,7 @@ class API(base.Base): self.update(context, instance_id, - vm_state=vm_states.ACTIVE, + vm_state=vm_states.RESIZING, task_state=task_states.RESIZE_PREP) instance_ref = self._get_instance(context, instance_id, 'resize') -- cgit From 6e14007c09a465374d1b50cd00549c1be6dc536c Mon Sep 17 00:00:00 2001 From: Brian Lamar Date: Thu, 25 Aug 2011 11:11:51 -0400 Subject: Removed RESIZE-CONFIRM hack. --- nova/api/openstack/views/servers.py | 8 -------- 1 file changed, 8 deletions(-) diff --git a/nova/api/openstack/views/servers.py b/nova/api/openstack/views/servers.py index 8f8d04ca0..b0daeb7a8 100644 --- a/nova/api/openstack/views/servers.py +++ b/nova/api/openstack/views/servers.py @@ -21,8 +21,6 @@ import hashlib import os from nova import exception -import nova.compute -import nova.context from nova.api.openstack import common from nova.api.openstack.views import addresses as addresses_view from nova.api.openstack.views import flavors as flavors_view @@ -70,12 +68,6 @@ class ViewBuilder(object): 'name': inst['display_name'], 'status': common.status_from_state(vm_state, task_state)} - ctxt = nova.context.get_admin_context() - compute_api = nova.compute.API() - - if compute_api.has_finished_migration(ctxt, inst['uuid']): - inst_dict['status'] = 'RESIZE-CONFIRM' - # Return the metadata as a dictionary metadata = {} for item in inst.get('metadata', []): -- cgit From 0e3986e71f4bbc848e81f18d6c3e6ad33ab3684c Mon Sep 17 00:00:00 2001 From: Brian Lamar Date: Thu, 25 Aug 2011 11:12:53 -0400 Subject: Removed invalid test. --- nova/tests/api/openstack/test_server_actions.py | 13 ------------- 1 file changed, 13 deletions(-) diff --git a/nova/tests/api/openstack/test_server_actions.py b/nova/tests/api/openstack/test_server_actions.py index 6fb21fad3..b9ef41465 100644 --- a/nova/tests/api/openstack/test_server_actions.py +++ b/nova/tests/api/openstack/test_server_actions.py @@ -244,19 +244,6 @@ class ServerActionsTest(test.TestCase): res = req.get_response(fakes.wsgi_app()) self.assertEqual(res.status_int, 500) - def test_resized_server_has_correct_status(self): - req = self.webreq('/1', 'GET') - - def fake_migration_get(*args): - return {} - - self.stubs.Set(nova.db, 'migration_get_by_instance_and_status', - fake_migration_get) - res = req.get_response(fakes.wsgi_app()) - self.assertEqual(res.status_int, 200) - body = json.loads(res.body) - self.assertEqual(body['server']['status'], 'RESIZE-CONFIRM') - def test_confirm_resize_server(self): req = self.webreq('/1/action', 'POST', dict(confirmResize=None)) -- cgit From 423a29ff347d3911ba1a98aa224e2a29bdbb8d4c Mon Sep 17 00:00:00 2001 From: Brian Lamar Date: Thu, 25 Aug 2011 11:17:31 -0400 Subject: Set error state when migration prep fails. --- nova/compute/manager.py | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/nova/compute/manager.py b/nova/compute/manager.py index c29eef07f..de43a5ced 100644 --- a/nova/compute/manager.py +++ b/nova/compute/manager.py @@ -929,8 +929,11 @@ class ComputeManager(manager.SchedulerDependentManager): instance_ref = self.db.instance_get_by_uuid(context, instance_id) if instance_ref['host'] == FLAGS.host: - raise exception.Error(_( - 'Migration error: destination same as source!')) + self._instance_update(context, + instance_id, + vm_state=vm_states.ERROR) + msg = _('Migration error: destination same as source!') + raise exception.Error(msg) old_instance_type = self.db.instance_type_get(context, instance_ref['instance_type_id']) -- cgit From a14e2b1d8cbc87d5bcb31b9127035160fde4acc5 Mon Sep 17 00:00:00 2001 From: Brian Lamar Date: Thu, 25 Aug 2011 11:44:28 -0400 Subject: Verify resize needs to be set. --- nova/compute/api.py | 8 ++++---- nova/compute/manager.py | 5 +++++ 2 files changed, 9 insertions(+), 4 deletions(-) diff --git a/nova/compute/api.py b/nova/compute/api.py index 89e44258c..47ad04930 100644 --- a/nova/compute/api.py +++ b/nova/compute/api.py @@ -1089,8 +1089,8 @@ class API(base.Base): self.update(context, instance_id, - vm_state=vm_states.VERIFY_RESIZE, - task_state=task_states.RESIZE_REVERTING) + vm_state=vm_states.ACTIVE, + task_state=None) params = {'migration_id': migration_ref['id']} self._cast_compute_message('revert_resize', context, @@ -1115,8 +1115,8 @@ class API(base.Base): self.update(context, instance_id, - vm_state=vm_states.VERIFY_RESIZE, - task_state=task_states.RESIZE_CONFIRMING) + vm_state=vm_states.ACTIVE, + task_state=None) params = {'migration_id': migration_ref['id']} self._cast_compute_message('confirm_resize', context, diff --git a/nova/compute/manager.py b/nova/compute/manager.py index de43a5ced..5427b896e 100644 --- a/nova/compute/manager.py +++ b/nova/compute/manager.py @@ -1027,6 +1027,11 @@ class ComputeManager(manager.SchedulerDependentManager): self.driver.finish_migration(context, instance_ref, disk_info, network_info, resize_instance) + self._instance_update(context, + instance_id, + vm_state=vm_states.VERIFY_RESIZE, + task_state=None) + self.db.migration_update(context, migration_id, {'status': 'finished', }) -- cgit From 6758779249d490fd21bfdeae6d40adfc33d8cd17 Mon Sep 17 00:00:00 2001 From: Brian Lamar Date: Thu, 25 Aug 2011 12:50:36 -0400 Subject: Reverted two mistakes when looking over full diff. --- nova/api/ec2/cloud.py | 10 ++++------ nova/compute/manager.py | 2 -- 2 files changed, 4 insertions(+), 8 deletions(-) diff --git a/nova/api/ec2/cloud.py b/nova/api/ec2/cloud.py index c5a360426..cf9437b08 100644 --- a/nova/api/ec2/cloud.py +++ b/nova/api/ec2/cloud.py @@ -104,12 +104,10 @@ def state_description_from_vm_state(vm_state): # TODO(yamahata): hypervisor dependent default device name _DEFAULT_ROOT_DEVICE_NAME = '/dev/sda1' -_DEFAULT_MAPPINGS = { - 'ami': 'sda1', - 'ephemeral0': 'sda2', - 'root': _DEFAULT_ROOT_DEVICE_NAME, - 'swap': 'sda3', -} +_DEFAULT_MAPPINGS = {'ami': 'sda1', + 'ephemeral0': 'sda2', + 'root': _DEFAULT_ROOT_DEVICE_NAME, + 'swap': 'sda3'} def _parse_block_device_mapping(bdm): diff --git a/nova/compute/manager.py b/nova/compute/manager.py index 5427b896e..a3ced1279 100644 --- a/nova/compute/manager.py +++ b/nova/compute/manager.py @@ -547,8 +547,6 @@ class ComputeManager(manager.SchedulerDependentManager): vm_state=vm_states.REBUILDING, task_state=task_states.BLOCK_DEVICE_MAPPING) - bd_mapping = self._setup_block_device_mapping(context, instance_id) - image_ref = kwargs.get('image_ref') instance_ref.image_ref = image_ref instance_ref.injected_files = kwargs.get('injected_files', []) -- cgit From d8d4aff908925b2f351e77291f4a8f394994063d Mon Sep 17 00:00:00 2001 From: Brian Lamar Date: Thu, 25 Aug 2011 16:38:38 -0400 Subject: Review feedback. --- nova/api/ec2/cloud.py | 8 +++++--- nova/api/openstack/common.py | 7 ++++--- 2 files changed, 9 insertions(+), 6 deletions(-) diff --git a/nova/api/ec2/cloud.py b/nova/api/ec2/cloud.py index cf9437b08..ac247a0ef 100644 --- a/nova/api/ec2/cloud.py +++ b/nova/api/ec2/cloud.py @@ -79,7 +79,9 @@ def _gen_key(context, user_id, key_name): return {'private_key': private_key, 'fingerprint': fingerprint} -# EC2 API: Valid Values: +# EC2 API can return the following values as documented in the EC2 API +# http://docs.amazonwebservices.com/AWSEC2/latest/APIReference/ +# ApiReference-ItemType-InstanceStateType.html # pending | running | shutting-down | terminated | stopping | stopped _STATE_DESCRIPTION_MAP = { None: 'pending', @@ -1065,8 +1067,8 @@ class CloudController(object): result): vm_state = instance['vm_state'] state_to_value = { - vm_states.STOPPED: 'stop', - vm_states.DELETED: 'terminate', + vm_states.STOPPED: 'stopped', + vm_states.DELETED: 'terminated', } value = state_to_value.get(vm_state) if value: diff --git a/nova/api/openstack/common.py b/nova/api/openstack/common.py index 07c6fbd11..bdbae0271 100644 --- a/nova/api/openstack/common.py +++ b/nova/api/openstack/common.py @@ -84,10 +84,11 @@ _STATE_MAP = { def status_from_state(vm_state, task_state='default'): """Given vm_state and task_state, return a status string.""" - LOG.debug("Generating status for vm_state=%(vm_state)s " - "task_state=%(task_state)s." % locals()) task_map = _STATE_MAP.get(vm_state, dict(default='UNKNOWN_STATE')) - return task_map.get(task_state, task_map['default']) + status = task_map.get(task_state, task_map['default']) + LOG.debug("Generated %(status)s from vm_state=%(vm_state)s " + "task_state=%(task_state)s." % locals()) + return status def vm_state_from_status(status): -- cgit From ae1ac682673648f2a2f364eabd525985f3d16a9d Mon Sep 17 00:00:00 2001 From: Brian Lamar Date: Thu, 25 Aug 2011 16:40:15 -0400 Subject: Bumped migration number. --- .../versions/042_update_instance_states.py | 141 --------------------- .../versions/043_update_instance_states.py | 141 +++++++++++++++++++++ 2 files changed, 141 insertions(+), 141 deletions(-) delete mode 100644 nova/db/sqlalchemy/migrate_repo/versions/042_update_instance_states.py create mode 100644 nova/db/sqlalchemy/migrate_repo/versions/043_update_instance_states.py diff --git a/nova/db/sqlalchemy/migrate_repo/versions/042_update_instance_states.py b/nova/db/sqlalchemy/migrate_repo/versions/042_update_instance_states.py deleted file mode 100644 index 65bdf601d..000000000 --- a/nova/db/sqlalchemy/migrate_repo/versions/042_update_instance_states.py +++ /dev/null @@ -1,141 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright 2010 OpenStack LLC. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import sqlalchemy -from sqlalchemy import MetaData, Table, Column, String - -from nova.compute import task_states -from nova.compute import vm_states - - -meta = MetaData() - - -c_task_state = Column('task_state', - String(length=255, convert_unicode=False, - assert_unicode=None, unicode_error=None, - _warn_on_bytestring=False), - nullable=True) - - -_upgrade_translations = { - "stopping": { - "vm_state": vm_states.ACTIVE, - "task_state": task_states.STOPPING, - }, - "stopped": { - "vm_state": vm_states.STOPPED, - "task_state": None, - }, - "terminated": { - "vm_state": vm_states.DELETED, - "task_state": None, - }, - "terminating": { - "vm_state": vm_states.ACTIVE, - "task_state": task_states.DELETING, - }, - "running": { - "vm_state": vm_states.ACTIVE, - "task_state": None, - }, - "scheduling": { - "vm_state": vm_states.BUILDING, - "task_state": task_states.SCHEDULING, - }, - "migrating": { - "vm_state": vm_states.MIGRATING, - "task_state": None, - }, - "pending": { - "vm_state": vm_states.BUILDING, - "task_state": task_states.SCHEDULING, - }, -} - - -_downgrade_translations = { - vm_states.ACTIVE: { - None: "running", - task_states.DELETING: "terminating", - task_states.STOPPING: "stopping", - }, - vm_states.BUILDING: { - None: "pending", - task_states.SCHEDULING: "scheduling", - }, - vm_states.STOPPED: { - None: "stopped", - }, - vm_states.REBUILDING: { - None: "pending", - }, - vm_states.DELETED: { - None: "terminated", - }, - vm_states.MIGRATING: { - None: "migrating", - }, -} - - -def upgrade(migrate_engine): - meta.bind = migrate_engine - - instance_table = Table('instances', meta, autoload=True, - autoload_with=migrate_engine) - - c_state = instance_table.c.state - c_state.alter(name='power_state') - - c_vm_state = instance_table.c.state_description - c_vm_state.alter(name='vm_state') - - instance_table.create_column(c_task_state) - - instance_table = Table('instances', meta, autoload=True, - autoload_with=migrate_engine) - - for old_state, values in _upgrade_translations.iteritems(): - instance_table.update().\ - values(**values).\ - where(c_vm_state == old_state).\ - execute() - - -def downgrade(migrate_engine): - meta.bind = migrate_engine - - instance_table = Table('instances', meta, autoload=True, - autoload_with=migrate_engine) - - c_task_state = instance_table.c.task_state - - c_state = instance_table.c.power_state - c_state.alter(name='state') - - c_vm_state = instance_table.c.vm_state - c_vm_state.alter(name='state_description') - - for old_vm_state, old_task_states in _downgrade_translations.iteritems(): - for old_task_state, new_state_desc in old_task_states.iteritems(): - instance_table.update().\ - where(c_task_state == old_task_state).\ - where(c_vm_state == old_vm_state).\ - values(state_description=new_state_desc).\ - execute() - - instance_table.drop_column('task_state') diff --git a/nova/db/sqlalchemy/migrate_repo/versions/043_update_instance_states.py b/nova/db/sqlalchemy/migrate_repo/versions/043_update_instance_states.py new file mode 100644 index 000000000..65bdf601d --- /dev/null +++ b/nova/db/sqlalchemy/migrate_repo/versions/043_update_instance_states.py @@ -0,0 +1,141 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 OpenStack LLC. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import sqlalchemy +from sqlalchemy import MetaData, Table, Column, String + +from nova.compute import task_states +from nova.compute import vm_states + + +meta = MetaData() + + +c_task_state = Column('task_state', + String(length=255, convert_unicode=False, + assert_unicode=None, unicode_error=None, + _warn_on_bytestring=False), + nullable=True) + + +_upgrade_translations = { + "stopping": { + "vm_state": vm_states.ACTIVE, + "task_state": task_states.STOPPING, + }, + "stopped": { + "vm_state": vm_states.STOPPED, + "task_state": None, + }, + "terminated": { + "vm_state": vm_states.DELETED, + "task_state": None, + }, + "terminating": { + "vm_state": vm_states.ACTIVE, + "task_state": task_states.DELETING, + }, + "running": { + "vm_state": vm_states.ACTIVE, + "task_state": None, + }, + "scheduling": { + "vm_state": vm_states.BUILDING, + "task_state": task_states.SCHEDULING, + }, + "migrating": { + "vm_state": vm_states.MIGRATING, + "task_state": None, + }, + "pending": { + "vm_state": vm_states.BUILDING, + "task_state": task_states.SCHEDULING, + }, +} + + +_downgrade_translations = { + vm_states.ACTIVE: { + None: "running", + task_states.DELETING: "terminating", + task_states.STOPPING: "stopping", + }, + vm_states.BUILDING: { + None: "pending", + task_states.SCHEDULING: "scheduling", + }, + vm_states.STOPPED: { + None: "stopped", + }, + vm_states.REBUILDING: { + None: "pending", + }, + vm_states.DELETED: { + None: "terminated", + }, + vm_states.MIGRATING: { + None: "migrating", + }, +} + + +def upgrade(migrate_engine): + meta.bind = migrate_engine + + instance_table = Table('instances', meta, autoload=True, + autoload_with=migrate_engine) + + c_state = instance_table.c.state + c_state.alter(name='power_state') + + c_vm_state = instance_table.c.state_description + c_vm_state.alter(name='vm_state') + + instance_table.create_column(c_task_state) + + instance_table = Table('instances', meta, autoload=True, + autoload_with=migrate_engine) + + for old_state, values in _upgrade_translations.iteritems(): + instance_table.update().\ + values(**values).\ + where(c_vm_state == old_state).\ + execute() + + +def downgrade(migrate_engine): + meta.bind = migrate_engine + + instance_table = Table('instances', meta, autoload=True, + autoload_with=migrate_engine) + + c_task_state = instance_table.c.task_state + + c_state = instance_table.c.power_state + c_state.alter(name='state') + + c_vm_state = instance_table.c.vm_state + c_vm_state.alter(name='state_description') + + for old_vm_state, old_task_states in _downgrade_translations.iteritems(): + for old_task_state, new_state_desc in old_task_states.iteritems(): + instance_table.update().\ + where(c_task_state == old_task_state).\ + where(c_vm_state == old_vm_state).\ + values(state_description=new_state_desc).\ + execute() + + instance_table.drop_column('task_state') -- cgit From c316782f8879ef321c4545b04bc9d24e11bb4ee6 Mon Sep 17 00:00:00 2001 From: Brian Lamar Date: Thu, 25 Aug 2011 17:27:10 -0400 Subject: review feedback --- nova/api/ec2/cloud.py | 3 +-- nova/api/openstack/common.py | 7 ++----- nova/compute/manager.py | 12 ++++++------ nova/compute/task_states.py | 21 +++++++++++++++------ nova/compute/vm_states.py | 9 +++++++-- nova/db/sqlalchemy/api.py | 40 ---------------------------------------- 6 files changed, 31 insertions(+), 61 deletions(-) diff --git a/nova/api/ec2/cloud.py b/nova/api/ec2/cloud.py index ac247a0ef..fe44191c8 100644 --- a/nova/api/ec2/cloud.py +++ b/nova/api/ec2/cloud.py @@ -92,7 +92,6 @@ _STATE_DESCRIPTION_MAP = { vm_states.STOPPED: 'stopped', vm_states.MIGRATING: 'migrate', vm_states.RESIZING: 'resize', - vm_states.VERIFY_RESIZE: 'verify_resize', vm_states.PAUSED: 'pause', vm_states.SUSPENDED: 'suspend', vm_states.RESCUED: 'rescue', @@ -101,7 +100,7 @@ _STATE_DESCRIPTION_MAP = { def state_description_from_vm_state(vm_state): """Map the vm state to the server status string""" - return _STATE_DESCRIPTION_MAP[vm_state] + return _STATE_DESCRIPTION_MAP.get(vm_state, vm_state) # TODO(yamahata): hypervisor dependent default device name diff --git a/nova/api/openstack/common.py b/nova/api/openstack/common.py index bdbae0271..d743a66ef 100644 --- a/nova/api/openstack/common.py +++ b/nova/api/openstack/common.py @@ -43,8 +43,8 @@ _STATE_MAP = { vm_states.ACTIVE: { 'default': 'ACTIVE', task_states.REBOOTING: 'REBOOT', - task_states.HARD_REBOOTING: 'HARD_REBOOT', - task_states.PASSWORD: 'PASSWORD', + task_states.UPDATING_PASSWORD: 'PASSWORD', + task_states.RESIZE_VERIFY: 'VERIFY_RESIZE', }, vm_states.BUILDING: { 'default': 'BUILD', @@ -61,9 +61,6 @@ _STATE_MAP = { vm_states.RESIZING: { 'default': 'RESIZE', }, - vm_states.VERIFY_RESIZE: { - 'default': 'VERIFY_RESIZE', - }, vm_states.PAUSED: { 'default': 'PAUSED', }, diff --git a/nova/compute/manager.py b/nova/compute/manager.py index a3ced1279..b4c6abae0 100644 --- a/nova/compute/manager.py +++ b/nova/compute/manager.py @@ -415,7 +415,7 @@ class ComputeManager(manager.SchedulerDependentManager): self._instance_update(context, instance_id, vm_state=vm_states.BUILDING, - task_state=task_states.SPAWN) + task_state=task_states.SPAWNING) # TODO(vish) check to make sure the availability zone matches try: @@ -557,7 +557,7 @@ class ComputeManager(manager.SchedulerDependentManager): self._instance_update(context, instance_id, vm_state=vm_states.REBUILDING, - task_state=task_states.SPAWN) + task_state=task_states.SPAWNING) # pull in new password here since the original password isn't in the db instance_ref.admin_pass = kwargs.get('new_pass', @@ -629,9 +629,9 @@ class ComputeManager(manager.SchedulerDependentManager): None if rotation shouldn't be used (as in the case of snapshots) """ if image_type == "snapshot": - task_state = task_states.SNAPSHOTTING + task_state = task_states.IMAGE_SNAPSHOT elif image_type == "backup": - task_state = task_states.BACKING_UP + task_state = task_states.IMAGE_BACKUP else: raise Exception(_('Image type not recognized %s') % image_type) @@ -1027,8 +1027,8 @@ class ComputeManager(manager.SchedulerDependentManager): self._instance_update(context, instance_id, - vm_state=vm_states.VERIFY_RESIZE, - task_state=None) + vm_state=vm_states.ACTIVE, + task_state=task_states.RESIZE_VERIFY) self.db.migration_update(context, migration_id, {'status': 'finished', }) diff --git a/nova/compute/task_states.py b/nova/compute/task_states.py index 5f78495ea..e3315a542 100644 --- a/nova/compute/task_states.py +++ b/nova/compute/task_states.py @@ -15,16 +15,25 @@ # License for the specific language governing permissions and limitations # under the License. -"""Possible task states for instances""" +"""Possible task states for instances. + +Compute instance task states represent what is happening to the instance at the +current moment. These tasks can be generic, such as 'spawning', or specific, +such as 'block_device_mapping'. These task states allow for a better view into +what an instance is doing and should be displayed to users/administrators as +necessary. + +""" SCHEDULING = 'scheduling' BLOCK_DEVICE_MAPPING = 'block_device_mapping' NETWORKING = 'networking' -SPAWN = 'spawn' +SPAWNING = 'spawning' + +IMAGE_SNAPSHOT = 'image_snapshot' +IMAGE_BACKUP = 'image_backup' -SNAPSHOTTING = 'snapshotting' -BACKING_UP = 'backing_up' -PASSWORD = 'password' +UPDATING_PASSWORD = 'updating_password' RESIZE_PREP = 'resize_prep' RESIZE_MIGRATING = 'resize_migrating' @@ -32,11 +41,11 @@ RESIZE_MIGRATED = 'resize_migrated' RESIZE_FINISH = 'resize_finish' RESIZE_REVERTING = 'resize_reverting' RESIZE_CONFIRMING = 'resize_confirming' +RESIZE_VERIFY = 'resize_verify' REBUILDING = 'rebuilding' REBOOTING = 'rebooting' -HARD_REBOOTING = 'hard_rebooting' PAUSING = 'pausing' UNPAUSING = 'unpausing' SUSPENDING = 'suspending' diff --git a/nova/compute/vm_states.py b/nova/compute/vm_states.py index 560e6d688..6f16c1f09 100644 --- a/nova/compute/vm_states.py +++ b/nova/compute/vm_states.py @@ -15,7 +15,13 @@ # License for the specific language governing permissions and limitations # under the License. -"""Possible vm states for instances""" +"""Possible vm states for instances. + +Compute instance vm states represent the state of an instance as it pertains to +a user or administrator. When combined with task states (task_states.py), a +better picture can be formed regarding the instance's health. + +""" ACTIVE = 'active' BUILDING = 'building' @@ -29,6 +35,5 @@ STOPPED = 'stopped' MIGRATING = 'migrating' RESIZING = 'resizing' -VERIFY_RESIZE = 'verify_resize' ERROR = 'error' diff --git a/nova/db/sqlalchemy/api.py b/nova/db/sqlalchemy/api.py index 08fc81759..7b78e286d 100644 --- a/nova/db/sqlalchemy/api.py +++ b/nova/db/sqlalchemy/api.py @@ -1484,46 +1484,6 @@ def instance_get_floating_address(context, instance_id): return fixed_ip_refs[0].floating_ips[0]['address'] -@require_admin_context -def instance_set_power_state(context, instance_id, power_state): - session = get_session() - partial = session.query(models.Instance) - - if utils.is_uuid_like(instance_id): - result = partial.filter_by(uuid=instance_id) - else: - result = partial.filter_by(id=instance_id) - - result.update({'power_state': power_state}) - - -@require_admin_context -def instance_set_vm_state(context, instance_id, vm_state): - # vm_state = running, halted, suspended, paused - session = get_session() - partial = session.query(models.Instance) - - if utils.is_uuid_like(instance_id): - result = partial.filter_by(uuid=instance_id) - else: - result = partial.filter_by(id=instance_id) - - result.update({'vm_state': vm_state}) - - -def instance_set_task_state(context, instance_id, task_state): - # task_state = running, halted, suspended, paused - session = get_session() - partial = session.query(models.Instance) - - if utils.is_uuid_like(instance_id): - result = partial.filter_by(uuid=instance_id) - else: - result = partial.filter_by(id=instance_id) - - result.update({'task_state': task_state}) - - @require_context def instance_update(context, instance_id, values): session = get_session() -- cgit From 2cf0b67e08e1608bd717ffadd41d5029db2b4a3a Mon Sep 17 00:00:00 2001 From: "Kevin L. Mitchell" Date: Thu, 25 Aug 2011 21:56:45 +0000 Subject: Fix glance image authorization check now that glance can do authorization checks on its own; use correct image service when looking for ramdisk, etc.; fix a couple of PEP8 errors --- nova/api/openstack/create_instance_helper.py | 6 +++--- nova/image/glance.py | 14 ++++++++++++++ nova/ipv6/account_identifier.py | 3 ++- nova/tests/test_ipv6.py | 2 +- 4 files changed, 20 insertions(+), 5 deletions(-) diff --git a/nova/api/openstack/create_instance_helper.py b/nova/api/openstack/create_instance_helper.py index 483ff4985..c428a8209 100644 --- a/nova/api/openstack/create_instance_helper.py +++ b/nova/api/openstack/create_instance_helper.py @@ -98,7 +98,7 @@ class CreateInstanceHelper(object): try: image_service, image_id = nova.image.get_image_service(image_href) kernel_id, ramdisk_id = self._get_kernel_ramdisk_from_image( - req, image_id) + req, image_service, image_id) images = set([str(x['id']) for x in image_service.index(context)]) assert str(image_id) in images except Exception, e: @@ -248,12 +248,12 @@ class CreateInstanceHelper(object): msg = _("Server name is an empty string") raise exc.HTTPBadRequest(explanation=msg) - def _get_kernel_ramdisk_from_image(self, req, image_id): + def _get_kernel_ramdisk_from_image(self, req, image_service, image_id): """Fetch an image from the ImageService, then if present, return the associated kernel and ramdisk image IDs. """ context = req.environ['nova.context'] - image_meta = self._image_service.show(context, image_id) + image_meta = image_service.show(context, image_id) # NOTE(sirp): extracted to a separate method to aid unit-testing, the # new method doesn't need a request obj or an ImageService stub kernel_id, ramdisk_id = self._do_get_kernel_ramdisk_from_image( diff --git a/nova/image/glance.py b/nova/image/glance.py index 9060f6a91..16f803218 100644 --- a/nova/image/glance.py +++ b/nova/image/glance.py @@ -269,6 +269,20 @@ class GlanceImageService(service.BaseImageService): image_meta = _convert_from_string(image_meta) return image_meta + @staticmethod + def _is_image_available(context, image_meta): + """Check image availability. + + Under Glance, images are always available if the context has + an auth_token. Otherwise, we fall back to the superclass + method. + + """ + if hasattr(context, 'auth_token') and context.auth_token: + return True + return service.BaseImageService._is_image_available(context, + image_meta) + # utility functions def _convert_timestamps_to_datetimes(image_meta): diff --git a/nova/ipv6/account_identifier.py b/nova/ipv6/account_identifier.py index 27bb01988..8a08510ac 100644 --- a/nova/ipv6/account_identifier.py +++ b/nova/ipv6/account_identifier.py @@ -39,7 +39,8 @@ def to_global(prefix, mac, project_id): except TypeError: raise TypeError(_('Bad prefix for to_global_ipv6: %s') % prefix) except NameError: - raise TypeError(_('Bad project_id for to_global_ipv6: %s') % project_id) + raise TypeError(_('Bad project_id for to_global_ipv6: %s') % + project_id) def to_mac(ipv6_address): diff --git a/nova/tests/test_ipv6.py b/nova/tests/test_ipv6.py index 04c1b5598..e1ba4aafb 100644 --- a/nova/tests/test_ipv6.py +++ b/nova/tests/test_ipv6.py @@ -48,7 +48,7 @@ class IPv6RFC2462TestCase(test.TestCase): def test_to_global_with_bad_prefix(self): bad_prefix = '82' self.assertRaises(TypeError, ipv6.to_global, - bad_prefix, + bad_prefix, '2001:db8::216:3eff:fe33:4455', 'test') -- cgit From 63b26178407423524390b2a47425b6953c910e00 Mon Sep 17 00:00:00 2001 From: Brian Lamar Date: Thu, 25 Aug 2011 18:00:32 -0400 Subject: Test fixup after last review feedback commit. --- nova/tests/api/openstack/test_servers.py | 11 ++++------- nova/tests/test_cloud.py | 2 +- nova/tests/test_xenapi.py | 2 -- 3 files changed, 5 insertions(+), 10 deletions(-) diff --git a/nova/tests/api/openstack/test_servers.py b/nova/tests/api/openstack/test_servers.py index 3905e4f7a..f75263c45 100644 --- a/nova/tests/api/openstack/test_servers.py +++ b/nova/tests/api/openstack/test_servers.py @@ -2488,11 +2488,6 @@ class TestServerStatus(test.TestCase): task_states.REBOOTING) self.assertEqual(response['server']['status'], 'REBOOT') - def test_hard_reboot(self): - response = self._get_with_state(vm_states.ACTIVE, - task_states.HARD_REBOOTING) - self.assertEqual(response['server']['status'], 'HARD_REBOOT') - def test_rebuild(self): response = self._get_with_state(vm_states.REBUILDING) self.assertEqual(response['server']['status'], 'REBUILD') @@ -2506,11 +2501,13 @@ class TestServerStatus(test.TestCase): self.assertEqual(response['server']['status'], 'RESIZE') def test_verify_resize(self): - response = self._get_with_state(vm_states.VERIFY_RESIZE) + response = self._get_with_state(vm_states.ACTIVE, + task_states.RESIZE_VERIFY) self.assertEqual(response['server']['status'], 'VERIFY_RESIZE') def test_password_update(self): - response = self._get_with_state(vm_states.ACTIVE, task_states.PASSWORD) + response = self._get_with_state(vm_states.ACTIVE, + task_states.UPDATING_PASSWORD) self.assertEqual(response['server']['status'], 'PASSWORD') def test_stopped(self): diff --git a/nova/tests/test_cloud.py b/nova/tests/test_cloud.py index 9d58b7341..9deb5c011 100644 --- a/nova/tests/test_cloud.py +++ b/nova/tests/test_cloud.py @@ -1611,7 +1611,7 @@ class CloudTestCase(test.TestCase): self.assertEqual(groupSet, expected_groupSet) self.assertEqual(get_attribute('instanceInitiatedShutdownBehavior'), {'instance_id': 'i-12345678', - 'instanceInitiatedShutdownBehavior': 'stop'}) + 'instanceInitiatedShutdownBehavior': 'stopped'}) self.assertEqual(get_attribute('instanceType'), {'instance_id': 'i-12345678', 'instanceType': 'fake_type'}) diff --git a/nova/tests/test_xenapi.py b/nova/tests/test_xenapi.py index 061e9ffea..2f0559366 100644 --- a/nova/tests/test_xenapi.py +++ b/nova/tests/test_xenapi.py @@ -24,8 +24,6 @@ import re import stubout import ast -eventlet.monkey_patch() - from nova import db from nova import context from nova import flags -- cgit From b846d22937ac62549832e16105ed06a21a3e34d0 Mon Sep 17 00:00:00 2001 From: Brian Lamar Date: Fri, 26 Aug 2011 15:36:33 -0400 Subject: Tiny tweaks to the migration script. --- .../versions/043_update_instance_states.py | 21 +++++++++------------ 1 file changed, 9 insertions(+), 12 deletions(-) diff --git a/nova/db/sqlalchemy/migrate_repo/versions/043_update_instance_states.py b/nova/db/sqlalchemy/migrate_repo/versions/043_update_instance_states.py index 65bdf601d..e58ae5362 100644 --- a/nova/db/sqlalchemy/migrate_repo/versions/043_update_instance_states.py +++ b/nova/db/sqlalchemy/migrate_repo/versions/043_update_instance_states.py @@ -33,35 +33,35 @@ c_task_state = Column('task_state', _upgrade_translations = { "stopping": { - "vm_state": vm_states.ACTIVE, + "state_description": vm_states.ACTIVE, "task_state": task_states.STOPPING, }, "stopped": { - "vm_state": vm_states.STOPPED, + "state_description": vm_states.STOPPED, "task_state": None, }, "terminated": { - "vm_state": vm_states.DELETED, + "state_description": vm_states.DELETED, "task_state": None, }, "terminating": { - "vm_state": vm_states.ACTIVE, + "state_description": vm_states.ACTIVE, "task_state": task_states.DELETING, }, "running": { - "vm_state": vm_states.ACTIVE, + "state_description": vm_states.ACTIVE, "task_state": None, }, "scheduling": { - "vm_state": vm_states.BUILDING, + "state_description": vm_states.BUILDING, "task_state": task_states.SCHEDULING, }, "migrating": { - "vm_state": vm_states.MIGRATING, + "state_description": vm_states.MIGRATING, "task_state": None, }, "pending": { - "vm_state": vm_states.BUILDING, + "state_description": vm_states.BUILDING, "task_state": task_states.SCHEDULING, }, } @@ -106,9 +106,6 @@ def upgrade(migrate_engine): instance_table.create_column(c_task_state) - instance_table = Table('instances', meta, autoload=True, - autoload_with=migrate_engine) - for old_state, values in _upgrade_translations.iteritems(): instance_table.update().\ values(**values).\ @@ -135,7 +132,7 @@ def downgrade(migrate_engine): instance_table.update().\ where(c_task_state == old_task_state).\ where(c_vm_state == old_vm_state).\ - values(state_description=new_state_desc).\ + values(vm_state=new_state_desc).\ execute() instance_table.drop_column('task_state') -- cgit From 8b44cedcc099542e6485a33764cece4c141fd4ab Mon Sep 17 00:00:00 2001 From: Chris Behrens Date: Fri, 26 Aug 2011 15:40:04 -0700 Subject: start of kombu implementation, keeping the same RPC interfaces --- nova/rpc/__init__.py | 25 +- nova/rpc/amqp.py | 593 ---------------------------------------------- nova/rpc/impl_carrot.py | 607 ++++++++++++++++++++++++++++++++++++++++++++++++ nova/rpc/impl_kombu.py | 426 +++++++++++++++++++++++++++++++++ 4 files changed, 1045 insertions(+), 606 deletions(-) delete mode 100644 nova/rpc/amqp.py create mode 100644 nova/rpc/impl_carrot.py create mode 100644 nova/rpc/impl_kombu.py diff --git a/nova/rpc/__init__.py b/nova/rpc/__init__.py index bdf7f705b..f102cf0fa 100644 --- a/nova/rpc/__init__.py +++ b/nova/rpc/__init__.py @@ -23,10 +23,18 @@ from nova import flags FLAGS = flags.FLAGS flags.DEFINE_string('rpc_backend', - 'nova.rpc.amqp', - "The messaging module to use, defaults to AMQP.") + 'carrot', + "The messaging module to use, defaults to carrot.") -RPCIMPL = import_object(FLAGS.rpc_backend) +impl_table = {'kombu': 'nova.rpc.impl_kombu', + 'amqp': 'nova.rpc.impl_kombu'} + 'carrot': 'nova.rpc.impl_carrot'} + + +# rpc_backend can be a short name like 'kombu', or it can be the full +# module name +RPCIMPL = import_object(impl_table.get(FLAGS.rpc_backend, + FLAGS.rpc_backend)) def create_connection(new=True): @@ -34,16 +42,7 @@ def create_connection(new=True): def create_consumer(conn, topic, proxy, fanout=False): - if fanout: - return RPCIMPL.FanoutAdapterConsumer( - connection=conn, - topic=topic, - proxy=proxy) - else: - return RPCIMPL.TopicAdapterConsumer( - connection=conn, - topic=topic, - proxy=proxy) + return RPCIMPL.create_consumer(conn, topic, proxy, fanout) def create_consumer_set(conn, consumers): diff --git a/nova/rpc/amqp.py b/nova/rpc/amqp.py deleted file mode 100644 index fe429b266..000000000 --- a/nova/rpc/amqp.py +++ /dev/null @@ -1,593 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright 2010 United States Government as represented by the -# Administrator of the National Aeronautics and Space Administration. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""AMQP-based RPC. - -Queues have consumers and publishers. - -No fan-out support yet. - -""" - -import json -import sys -import time -import traceback -import types -import uuid - -from carrot import connection as carrot_connection -from carrot import messaging -from eventlet import greenpool -from eventlet import pools -from eventlet import queue -import greenlet - -from nova import context -from nova import exception -from nova import fakerabbit -from nova import flags -from nova import log as logging -from nova import utils -from nova.rpc.common import RemoteError, LOG - - -FLAGS = flags.FLAGS -flags.DEFINE_integer('rpc_thread_pool_size', 1024, - 'Size of RPC thread pool') -flags.DEFINE_integer('rpc_conn_pool_size', 30, - 'Size of RPC connection pool') - - -class Connection(carrot_connection.BrokerConnection): - """Connection instance object.""" - - @classmethod - def instance(cls, new=True): - """Returns the instance.""" - if new or not hasattr(cls, '_instance'): - params = dict(hostname=FLAGS.rabbit_host, - port=FLAGS.rabbit_port, - ssl=FLAGS.rabbit_use_ssl, - userid=FLAGS.rabbit_userid, - password=FLAGS.rabbit_password, - virtual_host=FLAGS.rabbit_virtual_host) - - if FLAGS.fake_rabbit: - params['backend_cls'] = fakerabbit.Backend - - # NOTE(vish): magic is fun! - # pylint: disable=W0142 - if new: - return cls(**params) - else: - cls._instance = cls(**params) - return cls._instance - - @classmethod - def recreate(cls): - """Recreates the connection instance. - - This is necessary to recover from some network errors/disconnects. - - """ - try: - del cls._instance - except AttributeError, e: - # The _instance stuff is for testing purposes. Usually we don't use - # it. So don't freak out if it doesn't exist. - pass - return cls.instance() - - -class Pool(pools.Pool): - """Class that implements a Pool of Connections.""" - - # TODO(comstud): Timeout connections not used in a while - def create(self): - LOG.debug('Creating new connection') - return Connection.instance(new=True) - -# Create a ConnectionPool to use for RPC calls. We'll order the -# pool as a stack (LIFO), so that we can potentially loop through and -# timeout old unused connections at some point -ConnectionPool = Pool( - max_size=FLAGS.rpc_conn_pool_size, - order_as_stack=True) - - -class Consumer(messaging.Consumer): - """Consumer base class. - - Contains methods for connecting the fetch method to async loops. - - """ - - def __init__(self, *args, **kwargs): - for i in xrange(FLAGS.rabbit_max_retries): - if i > 0: - time.sleep(FLAGS.rabbit_retry_interval) - try: - super(Consumer, self).__init__(*args, **kwargs) - self.failed_connection = False - break - except Exception as e: # Catching all because carrot sucks - fl_host = FLAGS.rabbit_host - fl_port = FLAGS.rabbit_port - fl_intv = FLAGS.rabbit_retry_interval - LOG.error(_('AMQP server on %(fl_host)s:%(fl_port)d is' - ' unreachable: %(e)s. Trying again in %(fl_intv)d' - ' seconds.') % locals()) - self.failed_connection = True - if self.failed_connection: - LOG.error(_('Unable to connect to AMQP server ' - 'after %d tries. Shutting down.'), - FLAGS.rabbit_max_retries) - sys.exit(1) - - def fetch(self, no_ack=None, auto_ack=None, enable_callbacks=False): - """Wraps the parent fetch with some logic for failed connection.""" - # TODO(vish): the logic for failed connections and logging should be - # refactored into some sort of connection manager object - try: - if self.failed_connection: - # NOTE(vish): connection is defined in the parent class, we can - # recreate it as long as we create the backend too - # pylint: disable=W0201 - self.connection = Connection.recreate() - self.backend = self.connection.create_backend() - self.declare() - return super(Consumer, self).fetch(no_ack, - auto_ack, - enable_callbacks) - if self.failed_connection: - LOG.error(_('Reconnected to queue')) - self.failed_connection = False - # NOTE(vish): This is catching all errors because we really don't - # want exceptions to be logged 10 times a second if some - # persistent failure occurs. - except Exception, e: # pylint: disable=W0703 - if not self.failed_connection: - LOG.exception(_('Failed to fetch message from queue: %s' % e)) - self.failed_connection = True - - def attach_to_eventlet(self): - """Only needed for unit tests!""" - timer = utils.LoopingCall(self.fetch, enable_callbacks=True) - timer.start(0.1) - return timer - - -class AdapterConsumer(Consumer): - """Calls methods on a proxy object based on method and args.""" - - def __init__(self, connection=None, topic='broadcast', proxy=None): - LOG.debug(_('Initing the Adapter Consumer for %s') % topic) - self.proxy = proxy - self.pool = greenpool.GreenPool(FLAGS.rpc_thread_pool_size) - super(AdapterConsumer, self).__init__(connection=connection, - topic=topic) - self.register_callback(self.process_data) - - def process_data(self, message_data, message): - """Consumer callback to call a method on a proxy object. - - Parses the message for validity and fires off a thread to call the - proxy object method. - - Message data should be a dictionary with two keys: - method: string representing the method to call - args: dictionary of arg: value - - Example: {'method': 'echo', 'args': {'value': 42}} - - """ - LOG.debug(_('received %s') % message_data) - # This will be popped off in _unpack_context - msg_id = message_data.get('_msg_id', None) - ctxt = _unpack_context(message_data) - - method = message_data.get('method') - args = message_data.get('args', {}) - message.ack() - if not method: - # NOTE(vish): we may not want to ack here, but that means that bad - # messages stay in the queue indefinitely, so for now - # we just log the message and send an error string - # back to the caller - LOG.warn(_('no method for message: %s') % message_data) - if msg_id: - msg_reply(msg_id, - _('No method for message: %s') % message_data) - return - self.pool.spawn_n(self._process_data, msg_id, ctxt, method, args) - - @exception.wrap_exception() - def _process_data(self, msg_id, ctxt, method, args): - """Thread that maigcally looks for a method on the proxy - object and calls it. - """ - - node_func = getattr(self.proxy, str(method)) - node_args = dict((str(k), v) for k, v in args.iteritems()) - # NOTE(vish): magic is fun! - try: - rval = node_func(context=ctxt, **node_args) - if msg_id: - # Check if the result was a generator - if isinstance(rval, types.GeneratorType): - for x in rval: - msg_reply(msg_id, x, None) - else: - msg_reply(msg_id, rval, None) - - # This final None tells multicall that it is done. - msg_reply(msg_id, None, None) - elif isinstance(rval, types.GeneratorType): - # NOTE(vish): this iterates through the generator - list(rval) - except Exception as e: - logging.exception('Exception during message handling') - if msg_id: - msg_reply(msg_id, None, sys.exc_info()) - return - - -class TopicAdapterConsumer(AdapterConsumer): - """Consumes messages on a specific topic.""" - - exchange_type = 'topic' - - def __init__(self, connection=None, topic='broadcast', proxy=None): - self.queue = topic - self.routing_key = topic - self.exchange = FLAGS.control_exchange - self.durable = FLAGS.rabbit_durable_queues - super(TopicAdapterConsumer, self).__init__(connection=connection, - topic=topic, proxy=proxy) - - -class FanoutAdapterConsumer(AdapterConsumer): - """Consumes messages from a fanout exchange.""" - - exchange_type = 'fanout' - - def __init__(self, connection=None, topic='broadcast', proxy=None): - self.exchange = '%s_fanout' % topic - self.routing_key = topic - unique = uuid.uuid4().hex - self.queue = '%s_fanout_%s' % (topic, unique) - self.durable = False - # Fanout creates unique queue names, so we should auto-remove - # them when done, so they're not left around on restart. - # Also, we're the only one that should be consuming. exclusive - # implies auto_delete, so we'll just set that.. - self.exclusive = True - LOG.info(_('Created "%(exchange)s" fanout exchange ' - 'with "%(key)s" routing key'), - dict(exchange=self.exchange, key=self.routing_key)) - super(FanoutAdapterConsumer, self).__init__(connection=connection, - topic=topic, proxy=proxy) - - -class ConsumerSet(object): - """Groups consumers to listen on together on a single connection.""" - - def __init__(self, connection, consumer_list): - self.consumer_list = set(consumer_list) - self.consumer_set = None - self.enabled = True - self.init(connection) - - def init(self, conn): - if not conn: - conn = Connection.instance(new=True) - if self.consumer_set: - self.consumer_set.close() - self.consumer_set = messaging.ConsumerSet(conn) - for consumer in self.consumer_list: - consumer.connection = conn - # consumer.backend is set for us - self.consumer_set.add_consumer(consumer) - - def reconnect(self): - self.init(None) - - def wait(self, limit=None): - running = True - while running: - it = self.consumer_set.iterconsume(limit=limit) - if not it: - break - while True: - try: - it.next() - except StopIteration: - return - except greenlet.GreenletExit: - running = False - break - except Exception as e: - LOG.exception(_("Exception while processing consumer")) - self.reconnect() - # Break to outer loop - break - - def close(self): - self.consumer_set.close() - - -class Publisher(messaging.Publisher): - """Publisher base class.""" - pass - - -class TopicPublisher(Publisher): - """Publishes messages on a specific topic.""" - - exchange_type = 'topic' - - def __init__(self, connection=None, topic='broadcast'): - self.routing_key = topic - self.exchange = FLAGS.control_exchange - self.durable = FLAGS.rabbit_durable_queues - super(TopicPublisher, self).__init__(connection=connection) - - -class FanoutPublisher(Publisher): - """Publishes messages to a fanout exchange.""" - - exchange_type = 'fanout' - - def __init__(self, topic, connection=None): - self.exchange = '%s_fanout' % topic - self.queue = '%s_fanout' % topic - self.durable = False - self.auto_delete = True - LOG.info(_('Creating "%(exchange)s" fanout exchange'), - dict(exchange=self.exchange)) - super(FanoutPublisher, self).__init__(connection=connection) - - -class DirectConsumer(Consumer): - """Consumes messages directly on a channel specified by msg_id.""" - - exchange_type = 'direct' - - def __init__(self, connection=None, msg_id=None): - self.queue = msg_id - self.routing_key = msg_id - self.exchange = msg_id - self.durable = False - self.auto_delete = True - self.exclusive = True - super(DirectConsumer, self).__init__(connection=connection) - - -class DirectPublisher(Publisher): - """Publishes messages directly on a channel specified by msg_id.""" - - exchange_type = 'direct' - - def __init__(self, connection=None, msg_id=None): - self.routing_key = msg_id - self.exchange = msg_id - self.durable = False - self.auto_delete = True - super(DirectPublisher, self).__init__(connection=connection) - - -def msg_reply(msg_id, reply=None, failure=None): - """Sends a reply or an error on the channel signified by msg_id. - - Failure should be a sys.exc_info() tuple. - - """ - if failure: - message = str(failure[1]) - tb = traceback.format_exception(*failure) - LOG.error(_("Returning exception %s to caller"), message) - LOG.error(tb) - failure = (failure[0].__name__, str(failure[1]), tb) - - with ConnectionPool.item() as conn: - publisher = DirectPublisher(connection=conn, msg_id=msg_id) - try: - publisher.send({'result': reply, 'failure': failure}) - except TypeError: - publisher.send( - {'result': dict((k, repr(v)) - for k, v in reply.__dict__.iteritems()), - 'failure': failure}) - - publisher.close() - - -def _unpack_context(msg): - """Unpack context from msg.""" - context_dict = {} - for key in list(msg.keys()): - # NOTE(vish): Some versions of python don't like unicode keys - # in kwargs. - key = str(key) - if key.startswith('_context_'): - value = msg.pop(key) - context_dict[key[9:]] = value - context_dict['msg_id'] = msg.pop('_msg_id', None) - LOG.debug(_('unpacked context: %s'), context_dict) - return RpcContext.from_dict(context_dict) - - -def _pack_context(msg, context): - """Pack context into msg. - - Values for message keys need to be less than 255 chars, so we pull - context out into a bunch of separate keys. If we want to support - more arguments in rabbit messages, we may want to do the same - for args at some point. - - """ - context_d = dict([('_context_%s' % key, value) - for (key, value) in context.to_dict().iteritems()]) - msg.update(context_d) - - -class RpcContext(context.RequestContext): - def __init__(self, *args, **kwargs): - msg_id = kwargs.pop('msg_id', None) - self.msg_id = msg_id - super(RpcContext, self).__init__(*args, **kwargs) - - def reply(self, *args, **kwargs): - msg_reply(self.msg_id, *args, **kwargs) - - -def multicall(context, topic, msg): - """Make a call that returns multiple times.""" - LOG.debug(_('Making asynchronous call on %s ...'), topic) - msg_id = uuid.uuid4().hex - msg.update({'_msg_id': msg_id}) - LOG.debug(_('MSG_ID is %s') % (msg_id)) - _pack_context(msg, context) - - con_conn = ConnectionPool.get() - consumer = DirectConsumer(connection=con_conn, msg_id=msg_id) - wait_msg = MulticallWaiter(consumer) - consumer.register_callback(wait_msg) - - publisher = TopicPublisher(connection=con_conn, topic=topic) - publisher.send(msg) - publisher.close() - - return wait_msg - - -class MulticallWaiter(object): - def __init__(self, consumer): - self._consumer = consumer - self._results = queue.Queue() - self._closed = False - - def close(self): - self._closed = True - self._consumer.close() - ConnectionPool.put(self._consumer.connection) - - def __call__(self, data, message): - """Acks message and sets result.""" - message.ack() - if data['failure']: - self._results.put(RemoteError(*data['failure'])) - else: - self._results.put(data['result']) - - def __iter__(self): - return self.wait() - - def wait(self): - while True: - rv = None - while rv is None and not self._closed: - try: - rv = self._consumer.fetch(enable_callbacks=True) - except Exception: - self.close() - raise - time.sleep(0.01) - - result = self._results.get() - if isinstance(result, Exception): - self.close() - raise result - if result == None: - self.close() - raise StopIteration - yield result - - -def call(context, topic, msg): - """Sends a message on a topic and wait for a response.""" - rv = multicall(context, topic, msg) - # NOTE(vish): return the last result from the multicall - rv = list(rv) - if not rv: - return - return rv[-1] - - -def cast(context, topic, msg): - """Sends a message on a topic without waiting for a response.""" - LOG.debug(_('Making asynchronous cast on %s...'), topic) - _pack_context(msg, context) - with ConnectionPool.item() as conn: - publisher = TopicPublisher(connection=conn, topic=topic) - publisher.send(msg) - publisher.close() - - -def fanout_cast(context, topic, msg): - """Sends a message on a fanout exchange without waiting for a response.""" - LOG.debug(_('Making asynchronous fanout cast...')) - _pack_context(msg, context) - with ConnectionPool.item() as conn: - publisher = FanoutPublisher(topic, connection=conn) - publisher.send(msg) - publisher.close() - - -def generic_response(message_data, message): - """Logs a result and exits.""" - LOG.debug(_('response %s'), message_data) - message.ack() - sys.exit(0) - - -def send_message(topic, message, wait=True): - """Sends a message for testing.""" - msg_id = uuid.uuid4().hex - message.update({'_msg_id': msg_id}) - LOG.debug(_('topic is %s'), topic) - LOG.debug(_('message %s'), message) - - if wait: - consumer = messaging.Consumer(connection=Connection.instance(), - queue=msg_id, - exchange=msg_id, - auto_delete=True, - exchange_type='direct', - routing_key=msg_id) - consumer.register_callback(generic_response) - - publisher = messaging.Publisher(connection=Connection.instance(), - exchange=FLAGS.control_exchange, - durable=FLAGS.rabbit_durable_queues, - exchange_type='topic', - routing_key=topic) - publisher.send(message) - publisher.close() - - if wait: - consumer.wait() - consumer.close() - - -if __name__ == '__main__': - # You can send messages from the command line using - # topic and a json string representing a dictionary - # for the method - send_message(sys.argv[1], json.loads(sys.argv[2])) diff --git a/nova/rpc/impl_carrot.py b/nova/rpc/impl_carrot.py new file mode 100644 index 000000000..529f98722 --- /dev/null +++ b/nova/rpc/impl_carrot.py @@ -0,0 +1,607 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""AMQP-based RPC. + +Queues have consumers and publishers. + +No fan-out support yet. + +""" + +import json +import sys +import time +import traceback +import types +import uuid + +from carrot import connection as carrot_connection +from carrot import messaging +from eventlet import greenpool +from eventlet import pools +from eventlet import queue +import greenlet + +from nova import context +from nova import exception +from nova import fakerabbit +from nova import flags +from nova import log as logging +from nova import utils +from nova.rpc.common import RemoteError, LOG + + +FLAGS = flags.FLAGS +flags.DEFINE_integer('rpc_thread_pool_size', 1024, + 'Size of RPC thread pool') +flags.DEFINE_integer('rpc_conn_pool_size', 30, + 'Size of RPC connection pool') + + +class Connection(carrot_connection.BrokerConnection): + """Connection instance object.""" + + @classmethod + def instance(cls, new=True): + """Returns the instance.""" + if new or not hasattr(cls, '_instance'): + params = dict(hostname=FLAGS.rabbit_host, + port=FLAGS.rabbit_port, + ssl=FLAGS.rabbit_use_ssl, + userid=FLAGS.rabbit_userid, + password=FLAGS.rabbit_password, + virtual_host=FLAGS.rabbit_virtual_host) + + if FLAGS.fake_rabbit: + params['backend_cls'] = fakerabbit.Backend + + # NOTE(vish): magic is fun! + # pylint: disable=W0142 + if new: + return cls(**params) + else: + cls._instance = cls(**params) + return cls._instance + + @classmethod + def recreate(cls): + """Recreates the connection instance. + + This is necessary to recover from some network errors/disconnects. + + """ + try: + del cls._instance + except AttributeError, e: + # The _instance stuff is for testing purposes. Usually we don't use + # it. So don't freak out if it doesn't exist. + pass + return cls.instance() + + +class Pool(pools.Pool): + """Class that implements a Pool of Connections.""" + + # TODO(comstud): Timeout connections not used in a while + def create(self): + LOG.debug('Creating new connection') + return Connection.instance(new=True) + +# Create a ConnectionPool to use for RPC calls. We'll order the +# pool as a stack (LIFO), so that we can potentially loop through and +# timeout old unused connections at some point +ConnectionPool = Pool( + max_size=FLAGS.rpc_conn_pool_size, + order_as_stack=True) + + +class Consumer(messaging.Consumer): + """Consumer base class. + + Contains methods for connecting the fetch method to async loops. + + """ + + def __init__(self, *args, **kwargs): + for i in xrange(FLAGS.rabbit_max_retries): + if i > 0: + time.sleep(FLAGS.rabbit_retry_interval) + try: + super(Consumer, self).__init__(*args, **kwargs) + self.failed_connection = False + break + except Exception as e: # Catching all because carrot sucks + fl_host = FLAGS.rabbit_host + fl_port = FLAGS.rabbit_port + fl_intv = FLAGS.rabbit_retry_interval + LOG.error(_('AMQP server on %(fl_host)s:%(fl_port)d is' + ' unreachable: %(e)s. Trying again in %(fl_intv)d' + ' seconds.') % locals()) + self.failed_connection = True + if self.failed_connection: + LOG.error(_('Unable to connect to AMQP server ' + 'after %d tries. Shutting down.'), + FLAGS.rabbit_max_retries) + sys.exit(1) + + def fetch(self, no_ack=None, auto_ack=None, enable_callbacks=False): + """Wraps the parent fetch with some logic for failed connection.""" + # TODO(vish): the logic for failed connections and logging should be + # refactored into some sort of connection manager object + try: + if self.failed_connection: + # NOTE(vish): connection is defined in the parent class, we can + # recreate it as long as we create the backend too + # pylint: disable=W0201 + self.connection = Connection.recreate() + self.backend = self.connection.create_backend() + self.declare() + return super(Consumer, self).fetch(no_ack, + auto_ack, + enable_callbacks) + if self.failed_connection: + LOG.error(_('Reconnected to queue')) + self.failed_connection = False + # NOTE(vish): This is catching all errors because we really don't + # want exceptions to be logged 10 times a second if some + # persistent failure occurs. + except Exception, e: # pylint: disable=W0703 + if not self.failed_connection: + LOG.exception(_('Failed to fetch message from queue: %s' % e)) + self.failed_connection = True + + def attach_to_eventlet(self): + """Only needed for unit tests!""" + timer = utils.LoopingCall(self.fetch, enable_callbacks=True) + timer.start(0.1) + return timer + + +class AdapterConsumer(Consumer): + """Calls methods on a proxy object based on method and args.""" + + def __init__(self, connection=None, topic='broadcast', proxy=None): + LOG.debug(_('Initing the Adapter Consumer for %s') % topic) + self.proxy = proxy + self.pool = greenpool.GreenPool(FLAGS.rpc_thread_pool_size) + super(AdapterConsumer, self).__init__(connection=connection, + topic=topic) + self.register_callback(self.process_data) + + def process_data(self, message_data, message): + """Consumer callback to call a method on a proxy object. + + Parses the message for validity and fires off a thread to call the + proxy object method. + + Message data should be a dictionary with two keys: + method: string representing the method to call + args: dictionary of arg: value + + Example: {'method': 'echo', 'args': {'value': 42}} + + """ + LOG.debug(_('received %s') % message_data) + # This will be popped off in _unpack_context + msg_id = message_data.get('_msg_id', None) + ctxt = _unpack_context(message_data) + + method = message_data.get('method') + args = message_data.get('args', {}) + message.ack() + if not method: + # NOTE(vish): we may not want to ack here, but that means that bad + # messages stay in the queue indefinitely, so for now + # we just log the message and send an error string + # back to the caller + LOG.warn(_('no method for message: %s') % message_data) + if msg_id: + msg_reply(msg_id, + _('No method for message: %s') % message_data) + return + self.pool.spawn_n(self._process_data, msg_id, ctxt, method, args) + + @exception.wrap_exception() + def _process_data(self, msg_id, ctxt, method, args): + """Thread that maigcally looks for a method on the proxy + object and calls it. + """ + + node_func = getattr(self.proxy, str(method)) + node_args = dict((str(k), v) for k, v in args.iteritems()) + # NOTE(vish): magic is fun! + try: + rval = node_func(context=ctxt, **node_args) + if msg_id: + # Check if the result was a generator + if isinstance(rval, types.GeneratorType): + for x in rval: + msg_reply(msg_id, x, None) + else: + msg_reply(msg_id, rval, None) + + # This final None tells multicall that it is done. + msg_reply(msg_id, None, None) + elif isinstance(rval, types.GeneratorType): + # NOTE(vish): this iterates through the generator + list(rval) + except Exception as e: + logging.exception('Exception during message handling') + if msg_id: + msg_reply(msg_id, None, sys.exc_info()) + return + + +class TopicAdapterConsumer(AdapterConsumer): + """Consumes messages on a specific topic.""" + + exchange_type = 'topic' + + def __init__(self, connection=None, topic='broadcast', proxy=None): + self.queue = topic + self.routing_key = topic + self.exchange = FLAGS.control_exchange + self.durable = FLAGS.rabbit_durable_queues + super(TopicAdapterConsumer, self).__init__(connection=connection, + topic=topic, proxy=proxy) + + +class FanoutAdapterConsumer(AdapterConsumer): + """Consumes messages from a fanout exchange.""" + + exchange_type = 'fanout' + + def __init__(self, connection=None, topic='broadcast', proxy=None): + self.exchange = '%s_fanout' % topic + self.routing_key = topic + unique = uuid.uuid4().hex + self.queue = '%s_fanout_%s' % (topic, unique) + self.durable = False + # Fanout creates unique queue names, so we should auto-remove + # them when done, so they're not left around on restart. + # Also, we're the only one that should be consuming. exclusive + # implies auto_delete, so we'll just set that.. + self.exclusive = True + LOG.info(_('Created "%(exchange)s" fanout exchange ' + 'with "%(key)s" routing key'), + dict(exchange=self.exchange, key=self.routing_key)) + super(FanoutAdapterConsumer, self).__init__(connection=connection, + topic=topic, proxy=proxy) + + +class ConsumerSet(object): + """Groups consumers to listen on together on a single connection.""" + + def __init__(self, connection, consumer_list): + self.consumer_list = set(consumer_list) + self.consumer_set = None + self.enabled = True + self.init(connection) + + def init(self, conn): + if not conn: + conn = Connection.instance(new=True) + if self.consumer_set: + self.consumer_set.close() + self.consumer_set = messaging.ConsumerSet(conn) + for consumer in self.consumer_list: + consumer.connection = conn + # consumer.backend is set for us + self.consumer_set.add_consumer(consumer) + + def reconnect(self): + self.init(None) + + def wait(self, limit=None): + running = True + while running: + it = self.consumer_set.iterconsume(limit=limit) + if not it: + break + while True: + try: + it.next() + except StopIteration: + return + except greenlet.GreenletExit: + running = False + break + except Exception as e: + LOG.exception(_("Exception while processing consumer")) + self.reconnect() + # Break to outer loop + break + + def close(self): + self.consumer_set.close() + + +class Publisher(messaging.Publisher): + """Publisher base class.""" + pass + + +class TopicPublisher(Publisher): + """Publishes messages on a specific topic.""" + + exchange_type = 'topic' + + def __init__(self, connection=None, topic='broadcast'): + self.routing_key = topic + self.exchange = FLAGS.control_exchange + self.durable = FLAGS.rabbit_durable_queues + super(TopicPublisher, self).__init__(connection=connection) + + +class FanoutPublisher(Publisher): + """Publishes messages to a fanout exchange.""" + + exchange_type = 'fanout' + + def __init__(self, topic, connection=None): + self.exchange = '%s_fanout' % topic + self.queue = '%s_fanout' % topic + self.durable = False + self.auto_delete = True + LOG.info(_('Creating "%(exchange)s" fanout exchange'), + dict(exchange=self.exchange)) + super(FanoutPublisher, self).__init__(connection=connection) + + +class DirectConsumer(Consumer): + """Consumes messages directly on a channel specified by msg_id.""" + + exchange_type = 'direct' + + def __init__(self, connection=None, msg_id=None): + self.queue = msg_id + self.routing_key = msg_id + self.exchange = msg_id + self.durable = False + self.auto_delete = True + self.exclusive = True + super(DirectConsumer, self).__init__(connection=connection) + + +class DirectPublisher(Publisher): + """Publishes messages directly on a channel specified by msg_id.""" + + exchange_type = 'direct' + + def __init__(self, connection=None, msg_id=None): + self.routing_key = msg_id + self.exchange = msg_id + self.durable = False + self.auto_delete = True + super(DirectPublisher, self).__init__(connection=connection) + + +def msg_reply(msg_id, reply=None, failure=None): + """Sends a reply or an error on the channel signified by msg_id. + + Failure should be a sys.exc_info() tuple. + + """ + if failure: + message = str(failure[1]) + tb = traceback.format_exception(*failure) + LOG.error(_("Returning exception %s to caller"), message) + LOG.error(tb) + failure = (failure[0].__name__, str(failure[1]), tb) + + with ConnectionPool.item() as conn: + publisher = DirectPublisher(connection=conn, msg_id=msg_id) + try: + publisher.send({'result': reply, 'failure': failure}) + except TypeError: + publisher.send( + {'result': dict((k, repr(v)) + for k, v in reply.__dict__.iteritems()), + 'failure': failure}) + + publisher.close() + + +def _unpack_context(msg): + """Unpack context from msg.""" + context_dict = {} + for key in list(msg.keys()): + # NOTE(vish): Some versions of python don't like unicode keys + # in kwargs. + key = str(key) + if key.startswith('_context_'): + value = msg.pop(key) + context_dict[key[9:]] = value + context_dict['msg_id'] = msg.pop('_msg_id', None) + LOG.debug(_('unpacked context: %s'), context_dict) + return RpcContext.from_dict(context_dict) + + +def _pack_context(msg, context): + """Pack context into msg. + + Values for message keys need to be less than 255 chars, so we pull + context out into a bunch of separate keys. If we want to support + more arguments in rabbit messages, we may want to do the same + for args at some point. + + """ + context_d = dict([('_context_%s' % key, value) + for (key, value) in context.to_dict().iteritems()]) + msg.update(context_d) + + +class RpcContext(context.RequestContext): + def __init__(self, *args, **kwargs): + msg_id = kwargs.pop('msg_id', None) + self.msg_id = msg_id + super(RpcContext, self).__init__(*args, **kwargs) + + def reply(self, *args, **kwargs): + msg_reply(self.msg_id, *args, **kwargs) + + +def multicall(context, topic, msg): + """Make a call that returns multiple times.""" + LOG.debug(_('Making asynchronous call on %s ...'), topic) + msg_id = uuid.uuid4().hex + msg.update({'_msg_id': msg_id}) + LOG.debug(_('MSG_ID is %s') % (msg_id)) + _pack_context(msg, context) + + con_conn = ConnectionPool.get() + consumer = DirectConsumer(connection=con_conn, msg_id=msg_id) + wait_msg = MulticallWaiter(consumer) + consumer.register_callback(wait_msg) + + publisher = TopicPublisher(connection=con_conn, topic=topic) + publisher.send(msg) + publisher.close() + + return wait_msg + + +class MulticallWaiter(object): + def __init__(self, consumer): + self._consumer = consumer + self._results = queue.Queue() + self._closed = False + + def close(self): + self._closed = True + self._consumer.close() + ConnectionPool.put(self._consumer.connection) + + def __call__(self, data, message): + """Acks message and sets result.""" + message.ack() + if data['failure']: + self._results.put(RemoteError(*data['failure'])) + else: + self._results.put(data['result']) + + def __iter__(self): + return self.wait() + + def wait(self): + while True: + rv = None + while rv is None and not self._closed: + try: + rv = self._consumer.fetch(enable_callbacks=True) + except Exception: + self.close() + raise + time.sleep(0.01) + + result = self._results.get() + if isinstance(result, Exception): + self.close() + raise result + if result == None: + self.close() + raise StopIteration + yield result + + +def create_consumer(conn, topic, proxy, fanout=False): + """Create a consumer that calls methods in the proxy""" + if fanout: + return FanoutAdapterConsumer( + connection=conn, + topic=topic, + proxy=proxy) + else: + return TopicAdapterConsumer( + connection=conn, + topic=topic, + proxy=proxy) + + +def call(context, topic, msg): + """Sends a message on a topic and wait for a response.""" + rv = multicall(context, topic, msg) + # NOTE(vish): return the last result from the multicall + rv = list(rv) + if not rv: + return + return rv[-1] + + +def cast(context, topic, msg): + """Sends a message on a topic without waiting for a response.""" + LOG.debug(_('Making asynchronous cast on %s...'), topic) + _pack_context(msg, context) + with ConnectionPool.item() as conn: + publisher = TopicPublisher(connection=conn, topic=topic) + publisher.send(msg) + publisher.close() + + +def fanout_cast(context, topic, msg): + """Sends a message on a fanout exchange without waiting for a response.""" + LOG.debug(_('Making asynchronous fanout cast...')) + _pack_context(msg, context) + with ConnectionPool.item() as conn: + publisher = FanoutPublisher(topic, connection=conn) + publisher.send(msg) + publisher.close() + + +def generic_response(message_data, message): + """Logs a result and exits.""" + LOG.debug(_('response %s'), message_data) + message.ack() + sys.exit(0) + + +def send_message(topic, message, wait=True): + """Sends a message for testing.""" + msg_id = uuid.uuid4().hex + message.update({'_msg_id': msg_id}) + LOG.debug(_('topic is %s'), topic) + LOG.debug(_('message %s'), message) + + if wait: + consumer = messaging.Consumer(connection=Connection.instance(), + queue=msg_id, + exchange=msg_id, + auto_delete=True, + exchange_type='direct', + routing_key=msg_id) + consumer.register_callback(generic_response) + + publisher = messaging.Publisher(connection=Connection.instance(), + exchange=FLAGS.control_exchange, + durable=FLAGS.rabbit_durable_queues, + exchange_type='topic', + routing_key=topic) + publisher.send(message) + publisher.close() + + if wait: + consumer.wait() + consumer.close() + + +if __name__ == '__main__': + # You can send messages from the command line using + # topic and a json string representing a dictionary + # for the method + send_message(sys.argv[1], json.loads(sys.argv[2])) diff --git a/nova/rpc/impl_kombu.py b/nova/rpc/impl_kombu.py new file mode 100644 index 000000000..e609227c9 --- /dev/null +++ b/nova/rpc/impl_kombu.py @@ -0,0 +1,426 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2011 OpenStack LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from nova import flags +from nova import log as logging + +import kombu +import kombu.entity +import kombu.messaging +import kombu.connection +import itertools +import sys +import time +import uuid + + +FLAGS = flags.FLAGS +LOG = logging.getLogger('nova.rpc') + + +class QueueBase(object): + """Queue base class.""" + + def __init__(self, channel, callback, tag, **kwargs): + """Init the queue. + + 'channel' is the amqp channel to use + 'callback' is the callback to call when messages are received + 'tag' is a unique ID for the consumer on the channel + + queue name, exchange name, and other kombu options are + passed in here as a dictionary. + """ + self.callback = callback + self.tag = str(tag) + self.kwargs = kwargs + self.queue = None + self.reconnect(channel) + + def reconnect(self, channel): + """Re-create the queue after a rabbit reconnect""" + self.channel = channel + self.kwargs['channel'] = channel + self.queue = kombu.entity.Queue(**self.kwargs) + self.queue.declare() + + def consume(self, *args, **kwargs): + """Consume from this queue. + If a callback is specified in kwargs, use that. Otherwise, + use the callback passed during __init__() + + The callback will be called if a message was read off of the + queue. + + If kwargs['nowait'] is True, then this call will block until + a message is read. + + Messages will automatically be acked if the callback doesn't + raise an exception + """ + + options = {'consumer_tag': self.tag} + options['nowait'] = kwargs.get('nowait', False) + callback = kwargs.get('callback', self.callback) + if not callback: + raise ValueError("No callback defined") + + def _callback(raw_message): + message = self.channel.message_to_python(raw_message) + callback(message.payload) + message.ack() + + self.queue.consume(*args, callback=_callback, **options) + + def cancel(self): + """Cancel the consuming from the queue, if it has started""" + try: + self.queue.cancel(self.tag) + except KeyError, e: + # NOTE(comstud): Kludge to get around a amqplib bug + if str(e) != "u'%s'" % self.tag: + raise + self.queue = None + + +class DirectQueue(QueueBase): + """Queue/consumer class for 'direct'""" + + def __init__(self, channel, msg_id, callback, tag, **kwargs): + """Init a 'direct' queue. + + 'channel' is the amqp channel to use + 'msg_id' is the msg_id to listen on + 'callback' is the callback to call when messages are received + 'tag' is a unique ID for the consumer on the channel + + Other kombu options may be passed + """ + # Default options + options = {'durable': False, + 'auto_delete': True, + 'exclusive': True} + options.update(kwargs) + exchange = kombu.entity.Exchange( + name=msg_id, + type='direct', + durable=options['durable'], + auto_delete=options['auto_delete']) + super(DirectQueue, self).__init__( + channel, + callback, + tag, + name=msg_id, + exchange=exchange, + routing_key=msg_id, + **options) + + +class TopicQueue(QueueBase): + """Queue/consumer class for 'topic'""" + + def __init__(self, channel, topic, callback, tag, **kwargs): + """Init a 'topic' queue. + + 'channel' is the amqp channel to use + 'topic' is the topic to listen on + 'callback' is the callback to call when messages are received + 'tag' is a unique ID for the consumer on the channel + + Other kombu options may be passed + """ + # Default options + options = {'durable': FLAGS.rabbit_durable_queues, + 'auto_delete': False, + 'exclusive': False} + options.update(kwargs) + exchange = kombu.entity.Exchange( + name=FLAGS.control_exchange, + type='topic', + durable=options['durable'], + auto_delete=options['auto_delete']) + super(TopicQueue, self).__init__( + channel, + callback, + tag, + name=topic, + exchange=exchange, + routing_key=topic, + **options) + + +class FanoutQueue(QueueBase): + """Queue/consumer class for 'fanout'""" + + def __init__(self, channel, topic, callback, tag, **kwargs): + """Init a 'fanout' queue. + + 'channel' is the amqp channel to use + 'topic' is the topic to listen on + 'callback' is the callback to call when messages are received + 'tag' is a unique ID for the consumer on the channel + + Other kombu options may be passed + """ + unique = uuid.uuid4().hex + exchange_name = '%s_fanout' % topic + queue_name = '%s_fanout_%s' % (topic, unique) + + # Default options + options = {'durable': False, + 'auto_delete': True, + 'exclusive': True} + options.update(kwargs) + exchange = kombu.entity.Exchange( + name=exchange_name, + type='fanout', + durable=options['durable'], + auto_delete=options['auto_delete']) + super(FanoutQueue, self).__init__( + channel, + callback, + tag, + name=queue_name, + exchange=exchange, + routing_key=topic, + **options) + + +class Publisher(object): + """Base Publisher class""" + + def __init__(self, channel, exchange_name, routing_key, **kwargs): + """Init the Publisher class with the exchange_name, routing_key, + and other options + """ + self.exchange_name = exchange_name + self.routing_key = routing_key + self.kwargs = kwargs + self.reconnect(channel) + + def reconnect(self, channel): + """Re-establish the Producer after a rabbit reconnection""" + self.exchange = kombu.entity.Exchange(name=self.exchange_name, + **self.kwargs) + self.producer = kombu.messaging.Producer(exchange=self.exchange, + channel=channel, routing_key=self.routing_key) + + def send(self, msg): + """Send a message""" + self.producer.publish(msg) + + +class DirectPublisher(Publisher): + """Publisher class for 'direct'""" + def __init__(self, channel, msg_id, **kwargs): + """init a 'direct' publisher. + + Kombu options may be passed as keyword args to override defaults + """ + + options = {'durable': False, + 'auto_delete': True, + 'exclusive': True} + options.update(kwargs) + super(DirectPublisher, self).__init__(channel, + msg_id, + msg_id, + type='direct', + **options) + + +class TopicPublisher(Publisher): + """Publisher class for 'topic'""" + def __init__(self, channel, topic, **kwargs): + """init a 'topic' publisher. + + Kombu options may be passed as keyword args to override defaults + """ + options = {'durable': FLAGS.rabbit_durable_queues, + 'auto_delete': False, + 'exclusive': False} + options.update(kwargs) + super(TopicPublisher, self).__init__(channel, + FLAGS.control_exchange, + topic, + type='topic', + **options) + + +class FanoutPublisher(Publisher): + """Publisher class for 'fanout'""" + def __init__(self, channel, topic, **kwargs): + """init a 'fanout' publisher. + + Kombu options may be passed as keyword args to override defaults + """ + options = {'durable': False, + 'auto_delete': True, + 'exclusive': True} + options.update(kwargs) + super(FanoutPublisher, self).__init__(channel, + '%s_fanout' % topic, + None, + type='fanout', + **options) + + +class Connection(object): + """Connection instance object.""" + + def __init__(self): + self.queues = [] + self.max_retries = FLAGS.rabbit_max_retries + self.interval_start = FLAGS.rabbit_retry_interval + self.interval_stepping = 0 + self.interval_max = FLAGS.rabbit_retry_interval + + self.params = dict(hostname=FLAGS.rabbit_host, + port=FLAGS.rabbit_port, + userid=FLAGS.rabbit_userid, + password=FLAGS.rabbit_password, + virtual_host=FLAGS.rabbit_virtual_host) + if FLAGS.fake_rabbit: + self.params['transport'] = 'memory' + self.connection = None + self.reconnect() + + def reconnect(self): + """Handles reconnecting and re-estblishing queues""" + if self.connection: + try: + self.connection.close() + except self.connection.connection_errors: + pass + time.sleep(1) + self.connection = kombu.connection.Connection(**self.params) + self.queue_num = itertools.count(1) + + try: + self.connection.ensure_connection(errback=self.connect_error, + max_retries=self.max_retries, + interval_start=self.interval_start, + interval_step=self.interval_stepping, + interval_max=self.interval_max) + except self.connection.connection_errors, e: + err_str = str(e) + max_retries = FLAGS.rabbit_max_retries + LOG.error(_('Unable to connect to AMQP server ' + 'after %(max_retries)d tries: %(err_str)s') % locals()) + # NOTE(comstud): Original carrot code exits after so many + # attempts, but I wonder if we should re-try indefinitely + sys.exit(1) + LOG.info(_('Connected to AMQP server on %(hostname)s:%(port)d' % + self.params)) + self.channel = self.connection.channel() + for consumer in self.queues: + consumer.reconnect(self.channel) + if self.queues: + LOG.debug(_("Re-established AMQP queues")) + + def get_channel(self): + """Convenience call for bin/clear_rabbit_queues""" + return self.channel + + def connect_error(self, exc, interval): + """Callback when there are connection re-tries by kombu""" + info = self.params.copy() + info['intv'] = interval + info['e'] = exc + LOG.error(_('AMQP server on %(hostname)s:%(port)d is' + ' unreachable: %(e)s. Trying again in %(intv)d' + ' seconds.') % info) + + def close(self): + """Close/release this connection""" + self.connection.release() + self.connection = None + + def reset(self): + """Reset a connection so it can be used again""" + self.channel.close() + self.channel = self.connection.channel() + self.queues = [] + + def create_queue(self, queue_cls, topic, callback): + """Create a queue using the class that was passed in and + add it to our list of queues used for consuming + """ + self.queues.append(queue_cls(self.channel, topic, callback, + self.queue_num.next())) + + def consume(self, limit=None): + """Consume from all queues""" + while True: + try: + queues_head = self.queues[:-1] + queues_tail = self.queues[-1] + for queue in queues_head: + queue.consume(nowait=True) + queues_tail.consume(nowait=False) + + for iteration in itertools.count(0): + if limit and iteration >= limit: + raise StopIteration + yield self.connection.drain_events() + except self.connection.connection_errors, e: + LOG.exception(_('Failed to consume message from queue: ' + '%s' % str(e))) + self.reconnect() + + def publisher_send(self, cls, topic, msg): + """Send to a publisher based on the publisher class""" + while True: + publisher = None + try: + publisher = cls(self.channel, topic) + publisher.send(msg) + return + except self.connection.connection_errors, e: + LOG.exception(_('Failed to publish message %s' % str(e))) + try: + self.reconnect() + if publisher: + publisher.reconnect(self.channel) + except self.connection.connection_errors, e: + pass + + def direct_consumer(self, topic, callback): + """Create a 'direct' queue. + In nova's use, this is generally a msg_id queue used for + responses for call/multicall + """ + self.create_queue(DirectQueue, topic, callback) + + def topic_consumer(self, topic, callback=None): + """Create a 'topic' queue.""" + self.create_queue(TopicQueue, topic, callback) + + def fanout_consumer(self, topic, callback): + """Create a 'fanout' queue""" + self.create_queue(FanoutQueue, topic, callback) + + def direct_send(self, msg_id, msg): + """Send a 'direct' message""" + self.publisher_send(DirectPublisher, msg_id, msg) + + def topic_send(self, topic, msg): + """Send a 'topic' message""" + self.publisher_send(TopicPublisher, topic, msg) + + def fanout_send(self, topic, msg): + """Send a 'fanout' message""" + self.publisher_send(FanoutPublisher, topic, msg) -- cgit From 9cb46c48682657039173447f9689e15ed3ce15af Mon Sep 17 00:00:00 2001 From: Chris Behrens Date: Fri, 26 Aug 2011 15:59:15 -0700 Subject: more work done to restore original rpc interfaces. --- nova/rpc/FIXME | 2 + nova/rpc/__init__.py | 3 +- nova/rpc/impl_kombu.py | 307 ++++++++++++++++++++++++++++++++++++++++++++++++- 3 files changed, 306 insertions(+), 6 deletions(-) create mode 100644 nova/rpc/FIXME diff --git a/nova/rpc/FIXME b/nova/rpc/FIXME new file mode 100644 index 000000000..704081802 --- /dev/null +++ b/nova/rpc/FIXME @@ -0,0 +1,2 @@ +Move some code duplication between carrot/kombu into common.py +The other FIXMEs in __init__.py and impl_kombu.py diff --git a/nova/rpc/__init__.py b/nova/rpc/__init__.py index f102cf0fa..9371c2ab3 100644 --- a/nova/rpc/__init__.py +++ b/nova/rpc/__init__.py @@ -27,7 +27,7 @@ flags.DEFINE_string('rpc_backend', "The messaging module to use, defaults to carrot.") impl_table = {'kombu': 'nova.rpc.impl_kombu', - 'amqp': 'nova.rpc.impl_kombu'} + 'amqp': 'nova.rpc.impl_kombu', 'carrot': 'nova.rpc.impl_carrot'} @@ -46,6 +46,7 @@ def create_consumer(conn, topic, proxy, fanout=False): def create_consumer_set(conn, consumers): + # FIXME(comstud): replace however necessary return RPCIMPL.ConsumerSet(connection=conn, consumer_list=consumers) diff --git a/nova/rpc/impl_kombu.py b/nova/rpc/impl_kombu.py index e609227c9..a222bb885 100644 --- a/nova/rpc/impl_kombu.py +++ b/nova/rpc/impl_kombu.py @@ -30,6 +30,11 @@ import uuid FLAGS = flags.FLAGS LOG = logging.getLogger('nova.rpc') +flags.DEFINE_integer('rpc_conn_pool_size', 30, + 'Size of RPC connection pool') +flags.DEFINE_integer('rpc_thread_pool_size', 1024, + 'Size of RPC thread pool') + class QueueBase(object): """Queue base class.""" @@ -298,6 +303,16 @@ class Connection(object): self.connection = None self.reconnect() + @classmethod + def instance(cls, new=True): + """Returns the instance.""" + if new or not hasattr(cls, '_instance'): + if new: + return cls() + else: + cls._instance = cls() + return cls._instance + def reconnect(self): """Handles reconnecting and re-estblishing queues""" if self.connection: @@ -359,8 +374,10 @@ class Connection(object): """Create a queue using the class that was passed in and add it to our list of queues used for consuming """ - self.queues.append(queue_cls(self.channel, topic, callback, - self.queue_num.next())) + queue = queue_cls(self.channel, topic, callback, + self.queue_num.next()) + self.queues.append(queue) + return queue def consume(self, limit=None): """Consume from all queues""" @@ -403,15 +420,15 @@ class Connection(object): In nova's use, this is generally a msg_id queue used for responses for call/multicall """ - self.create_queue(DirectQueue, topic, callback) + return self.create_queue(DirectQueue, topic, callback) def topic_consumer(self, topic, callback=None): """Create a 'topic' queue.""" - self.create_queue(TopicQueue, topic, callback) + return self.create_queue(TopicQueue, topic, callback) def fanout_consumer(self, topic, callback): """Create a 'fanout' queue""" - self.create_queue(FanoutQueue, topic, callback) + return self.create_queue(FanoutQueue, topic, callback) def direct_send(self, msg_id, msg): """Send a 'direct' message""" @@ -424,3 +441,283 @@ class Connection(object): def fanout_send(self, topic, msg): """Send a 'fanout' message""" self.publisher_send(FanoutPublisher, topic, msg) + + +class Pool(pools.Pool): + """Class that implements a Pool of Connections.""" + + # TODO(comstud): Timeout connections not used in a while + def create(self): + LOG.debug('Creating new connection') + return RPCIMPL.Connection() + +# Create a ConnectionPool to use for RPC calls. We'll order the +# pool as a stack (LIFO), so that we can potentially loop through and +# timeout old unused connections at some point +ConnectionPool = Pool( + max_size=FLAGS.rpc_conn_pool_size, + order_as_stack=True) + + +class ConnectionContext(object): + def __init__(self, pooled=True): + self.connection = None + if pooled: + self.connection = ConnectionPool.get() + else: + self.connection = RPCIMPL.Connection() + self.pooled = pooled + + def __enter__(self): + return self + + def _done(self): + if self.connection: + if self.pooled: + # Reset the connection so it's ready for the next caller + # to grab from the pool + self.connection.reset() + ConnectionPool.put(self.connection) + else: + try: + self.connection.close() + except Exception: + # There's apparently a bug in kombu 'memory' transport + # which causes an assert failure. + # But, we probably want to ignore all exceptions when + # trying to close a connection, anyway... + pass + self.connection = None + + def __exit__(self, t, v, tb): + """end if 'with' statement. We're done here.""" + self._done() + + def __del__(self): + """Put Connection back into the pool if this ConnectionContext + is being deleted + """ + self._done() + + def close(self): + self._done() + + def __getattr__(self, key): + if self.connection: + return getattr(self.connection, key) + else: + raise exception.InvalidRPCConnectionReuse() + + +class ProxyCallback(object): + """Calls methods on a proxy object based on method and args.""" + + def __init__(self, proxy): + self.proxy = proxy + self.pool = greenpool.GreenPool(FLAGS.rpc_thread_pool_size) + + def __call__(self, message_data): + """Consumer callback to call a method on a proxy object. + + Parses the message for validity and fires off a thread to call the + proxy object method. + + Message data should be a dictionary with two keys: + method: string representing the method to call + args: dictionary of arg: value + + Example: {'method': 'echo', 'args': {'value': 42}} + + """ + LOG.debug(_('received %s') % message_data) + ctxt = _unpack_context(message_data) + method = message_data.get('method') + args = message_data.get('args', {}) + if not method: + LOG.warn(_('no method for message: %s') % message_data) + ctxt.reply(_('No method for message: %s') % message_data) + return + self.pool.spawn_n(self._process_data, ctxt, method, args) + + @exception.wrap_exception() + def _process_data(self, ctxt, method, args): + """Thread that maigcally looks for a method on the proxy + object and calls it. + """ + + node_func = getattr(self.proxy, str(method)) + node_args = dict((str(k), v) for k, v in args.iteritems()) + # NOTE(vish): magic is fun! + try: + rval = node_func(context=ctxt, **node_args) + # Check if the result was a generator + if isinstance(rval, types.GeneratorType): + for x in rval: + ctxt.reply(x, None) + else: + ctxt.reply(rval, None) + # This final None tells multicall that it is done. + ctxt.reply(None, None) + except Exception as e: + logging.exception('Exception during message handling') + ctxt.reply(None, sys.exc_info()) + return + + +def _unpack_context(msg): + """Unpack context from msg.""" + context_dict = {} + for key in list(msg.keys()): + # NOTE(vish): Some versions of python don't like unicode keys + # in kwargs. + key = str(key) + if key.startswith('_context_'): + value = msg.pop(key) + context_dict[key[9:]] = value + context_dict['msg_id'] = msg.pop('_msg_id', None) + LOG.debug(_('unpacked context: %s'), context_dict) + return RpcContext.from_dict(context_dict) + + +def _pack_context(msg, context): + """Pack context into msg. + + Values for message keys need to be less than 255 chars, so we pull + context out into a bunch of separate keys. If we want to support + more arguments in rabbit messages, we may want to do the same + for args at some point. + + """ + context_d = dict([('_context_%s' % key, value) + for (key, value) in context.to_dict().iteritems()]) + msg.update(context_d) + + +class RpcContext(context.RequestContext): + def __init__(self, *args, **kwargs): + msg_id = kwargs.pop('msg_id', None) + self.msg_id = msg_id + super(RpcContext, self).__init__(*args, **kwargs) + + def reply(self, *args, **kwargs): + if self.msg_id: + msg_reply(self.msg_id, *args, **kwargs) + + +class MulticallWaiter(object): + def __init__(self, connection): + self._connection = connection + self._iterator = connection.consume() + self._result = None + self._done = False + + def done(self): + self._done = True + self._connection = None + + def __call__(self, data): + """The consume() callback will call this. Store the result.""" + if data['failure']: + self._result = RemoteError(*data['failure']) + else: + self._result = data['result'] + + def __iter__(self): + if self._done: + raise StopIteration + while True: + self._iterator.next() + result = self._result + if isinstance(result, Exception): + self.done() + raise result + if result == None: + self.done() + raise StopIteration + yield result + + +def create_consumer(conn, topic, proxy, fanout=False): + """Create a consumer that calls a method in a proxy object""" + if fanout: + return conn.fanout_consumer(topic, ProxyCallback(proxy)) + else: + return conn.topic_consumer(topic, ProxyCallback(proxy)) + + +def create_consumer_set(conn, consumers): + # FIXME(comstud): Replace this however necessary + # Returns an object that you can call .wait() on to consume + # all queues? + # Needs to have a .close() which will stop consuming? + # Needs to also have an attach_to_eventlet method for tests? + raise NotImplemented + + +def multicall(context, topic, msg): + """Make a call that returns multiple times.""" + # Can't use 'with' for multicall, as it returns an iterator + # that will continue to use the connection. When it's done, + # connection.close() will get called which will put it back into + # the pool + LOG.debug(_('Making asynchronous call on %s ...'), topic) + msg_id = uuid.uuid4().hex + msg.update({'_msg_id': msg_id}) + LOG.debug(_('MSG_ID is %s') % (msg_id)) + _pack_context(msg, context) + + conn = ConnectionContext() + wait_msg = MulticallWaiter(conn) + conn.direct_consumer(msg_id, wait_msg) + conn.topic_send(topic, msg) + + return wait_msg + + +def call(context, topic, msg): + """Sends a message on a topic and wait for a response.""" + rv = multicall(context, topic, msg) + # NOTE(vish): return the last result from the multicall + rv = list(rv) + if not rv: + return + return rv[-1] + + +def cast(context, topic, msg): + """Sends a message on a topic without waiting for a response.""" + LOG.debug(_('Making asynchronous cast on %s...'), topic) + _pack_context(msg, context) + with ConnectionContext() as conn: + conn.topic_send(topic, msg) + + +def fanout_cast(context, topic, msg): + """Sends a message on a fanout exchange without waiting for a response.""" + LOG.debug(_('Making asynchronous fanout cast...')) + _pack_context(msg, context) + with ConnectionContext() as conn: + conn.fanout_send(topic, msg) + + +def msg_reply(msg_id, reply=None, failure=None): + """Sends a reply or an error on the channel signified by msg_id. + + Failure should be a sys.exc_info() tuple. + + """ + with ConnectionContext() as conn: + if failure: + message = str(failure[1]) + tb = traceback.format_exception(*failure) + LOG.error(_("Returning exception %s to caller"), message) + LOG.error(tb) + failure = (failure[0].__name__, str(failure[1]), tb) + + try: + msg = {'result': reply, 'failure': failure} + except TypeError: + msg = {'result': dict((k, repr(v)) + for k, v in reply.__dict__.iteritems()), + 'failure': failure} + conn.direct_send(msg_id, msg) -- cgit From 1c5031ca6f89de4ac8a7dac271aff9942fc9602a Mon Sep 17 00:00:00 2001 From: Chris Behrens Date: Fri, 26 Aug 2011 16:00:50 -0700 Subject: more fixes --- nova/rpc/impl_kombu.py | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/nova/rpc/impl_kombu.py b/nova/rpc/impl_kombu.py index a222bb885..cfef421c6 100644 --- a/nova/rpc/impl_kombu.py +++ b/nova/rpc/impl_kombu.py @@ -15,7 +15,7 @@ # under the License. from nova import flags -from nova import log as logging +from nova.rpc.common import RemoteError, LOG import kombu import kombu.entity @@ -28,7 +28,6 @@ import uuid FLAGS = flags.FLAGS -LOG = logging.getLogger('nova.rpc') flags.DEFINE_integer('rpc_conn_pool_size', 30, 'Size of RPC connection pool') @@ -559,7 +558,7 @@ class ProxyCallback(object): # This final None tells multicall that it is done. ctxt.reply(None, None) except Exception as e: - logging.exception('Exception during message handling') + LOG.exception('Exception during message handling') ctxt.reply(None, sys.exc_info()) return -- cgit From 3c835ebe4bb6e0aa61da2a2e44a6b19bc92fc72a Mon Sep 17 00:00:00 2001 From: Chris Behrens Date: Fri, 26 Aug 2011 16:04:34 -0700 Subject: flag for kombu connection backoff on retries --- nova/flags.py | 1 + nova/rpc/impl_kombu.py | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/nova/flags.py b/nova/flags.py index 95000df1b..ac70386e7 100644 --- a/nova/flags.py +++ b/nova/flags.py @@ -303,6 +303,7 @@ DEFINE_string('rabbit_userid', 'guest', 'rabbit userid') DEFINE_string('rabbit_password', 'guest', 'rabbit password') DEFINE_string('rabbit_virtual_host', '/', 'rabbit virtual host') DEFINE_integer('rabbit_retry_interval', 10, 'rabbit connection retry interval') +DEFINE_integer('rabbit_interval_stepping', 2, 'rabbit connection retry backoff in seconds') DEFINE_integer('rabbit_max_retries', 12, 'rabbit connection attempts') DEFINE_string('control_exchange', 'nova', 'the main exchange to connect to') DEFINE_boolean('rabbit_durable_queues', False, 'use durable queues') diff --git a/nova/rpc/impl_kombu.py b/nova/rpc/impl_kombu.py index cfef421c6..65199808e 100644 --- a/nova/rpc/impl_kombu.py +++ b/nova/rpc/impl_kombu.py @@ -289,7 +289,7 @@ class Connection(object): self.queues = [] self.max_retries = FLAGS.rabbit_max_retries self.interval_start = FLAGS.rabbit_retry_interval - self.interval_stepping = 0 + self.interval_stepping = FLAGS.rabbit_interval_stepping self.interval_max = FLAGS.rabbit_retry_interval self.params = dict(hostname=FLAGS.rabbit_host, -- cgit From 400427ab786779109d49b27eda2fe9e246503dd6 Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Fri, 26 Aug 2011 16:17:40 -0700 Subject: use db layer for aggregation --- nova/api/openstack/contrib/simple_tenant_usage.py | 72 ++++++-------------- nova/db/api.py | 11 +++- nova/db/sqlalchemy/api.py | 19 ++++-- .../openstack/contrib/test_simple_tenant_usage.py | 77 +++++++++------------- 4 files changed, 70 insertions(+), 109 deletions(-) diff --git a/nova/api/openstack/contrib/simple_tenant_usage.py b/nova/api/openstack/contrib/simple_tenant_usage.py index 5f4218237..16e712815 100644 --- a/nova/api/openstack/contrib/simple_tenant_usage.py +++ b/nova/api/openstack/contrib/simple_tenant_usage.py @@ -31,35 +31,7 @@ from webob import exc FLAGS = flags.FLAGS -INSTANCE_FIELDS = ['id', - 'image_ref', - 'project_id', - 'user_id', - 'display_name', - 'state_description', - 'instance_type_id', - 'launched_at', - 'terminated_at'] - - class SimpleTenantUsageController(object): - def _get_instances_for_time_period(self, period_start, period_stop, - tenant_id): - tenant_clause = '' - if tenant_id: - tenant_clause = " and project_id='%s'" % tenant_id - - conn = get_session().connection() - rows = conn.execute("select %s from instances where \ - (terminated_at is NULL or terminated_at > '%s') \ - and (launched_at < '%s') %s" %\ - (','.join(INSTANCE_FIELDS), - period_start.isoformat(' '),\ - period_stop.isoformat(' '), - tenant_clause)).fetchall() - - return rows - def _hours_for(self, instance, period_start, period_stop): launched_at = instance['launched_at'] terminated_at = instance['terminated_at'] @@ -99,62 +71,58 @@ class SimpleTenantUsageController(object): def _tenant_usages_for_period(self, context, period_start, period_stop, tenant_id=None, detailed=True): - rows = self._get_instances_for_time_period(period_start, - period_stop, - tenant_id) + instances = db.instance_get_active_by_window(context, + period_start, + period_stop, + tenant_id, + fast=True) + from nova import log as logging + logging.info(instances) rval = {} flavors = {} - for row in rows: + for instance in instances: info = {} - for i in range(len(INSTANCE_FIELDS)): - info[INSTANCE_FIELDS[i]] = row[i] - info['hours'] = self._hours_for(info, period_start, period_stop) - flavor_type = info['instance_type_id'] + info['hours'] = self._hours_for(instance, + period_start, + period_stop) + flavor_type = instance['instance_type_id'] if not flavors.get(flavor_type): try: flavors[flavor_type] = db.instance_type_get(context, - info['instance_type_id']) + flavor_type) except exception.InstanceTypeNotFound: # can't bill if there is no instance type continue flavor = flavors[flavor_type] - info['name'] = info['display_name'] - del(info['display_name']) + info['name'] = instance['display_name'] info['memory_mb'] = flavor['memory_mb'] info['local_gb'] = flavor['local_gb'] info['vcpus'] = flavor['vcpus'] - info['tenant_id'] = info['project_id'] - del(info['project_id']) + info['tenant_id'] = instance['project_id'] info['flavor'] = flavor['name'] - del(info['instance_type_id']) - info['started_at'] = info['launched_at'] - del(info['launched_at']) + info['started_at'] = instance['launched_at'] - info['ended_at'] = info['terminated_at'] - del(info['terminated_at']) + info['ended_at'] = instance['terminated_at'] if info['ended_at']: info['state'] = 'terminated' else: - info['state'] = info['state_description'] - - del(info['state_description']) + info['state'] = instance['state_description'] now = datetime.utcnow() if info['state'] == 'terminated': - delta = self._parse_datetime(info['ended_at'])\ - - self._parse_datetime(info['started_at']) + delta = info['ended_at'] - info['started_at'] else: - delta = now - self._parse_datetime(info['started_at']) + delta = now - info['started_at'] info['uptime'] = delta.days * 24 * 60 + delta.seconds diff --git a/nova/db/api.py b/nova/db/api.py index 3bb9b4970..f443239c1 100644 --- a/nova/db/api.py +++ b/nova/db/api.py @@ -495,9 +495,14 @@ def instance_get_all_by_filters(context, filters): return IMPL.instance_get_all_by_filters(context, filters) -def instance_get_active_by_window(context, begin, end=None): - """Get instances active during a certain time window.""" - return IMPL.instance_get_active_by_window(context, begin, end) +def instance_get_active_by_window(context, begin, end=None, + project_id=None, fast=False): + """Get instances active during a certain time window. + + Setting fast to true will stop all joinedloads. + Specifying a project_id will filter for a certain project.""" + return IMPL.instance_get_active_by_window(context, begin, end, + project_id, fast) def instance_get_all_by_user(context, user_id): diff --git a/nova/db/sqlalchemy/api.py b/nova/db/sqlalchemy/api.py index d1fbf8cab..2d50f458f 100644 --- a/nova/db/sqlalchemy/api.py +++ b/nova/db/sqlalchemy/api.py @@ -1307,20 +1307,25 @@ def instance_get_all_by_filters(context, filters): @require_admin_context -def instance_get_active_by_window(context, begin, end=None): +def instance_get_active_by_window(context, begin, end=None, + project_id=None, fast=False): """Return instances that were continuously active over the given window""" session = get_session() - query = session.query(models.Instance).\ - options(joinedload_all('fixed_ips.floating_ips')).\ - options(joinedload('security_groups')).\ - options(joinedload_all('fixed_ips.network')).\ - options(joinedload('instance_type')).\ - filter(models.Instance.launched_at < begin) + query = session.query(models.Instance) + if not fast: + query = query.options(joinedload_all('fixed_ips.floating_ips')).\ + options(joinedload('security_groups')).\ + options(joinedload_all('fixed_ips.network')).\ + options(joinedload('instance_type')) + + query = query.filter(models.Instance.launched_at < begin) if end: query = query.filter(or_(models.Instance.terminated_at == None, models.Instance.terminated_at > end)) else: query = query.filter(models.Instance.terminated_at == None) + if project_id: + query = query.filter_by(project_id=project_id) return query.all() diff --git a/nova/tests/api/openstack/contrib/test_simple_tenant_usage.py b/nova/tests/api/openstack/contrib/test_simple_tenant_usage.py index d20e36aaf..2bd619820 100644 --- a/nova/tests/api/openstack/contrib/test_simple_tenant_usage.py +++ b/nova/tests/api/openstack/contrib/test_simple_tenant_usage.py @@ -23,13 +23,8 @@ from nova import context from nova import db from nova import flags from nova import test -from nova.compute import instance_types -from nova.db.sqlalchemy import models -from nova.db.sqlalchemy import session from nova.tests.api.openstack import fakes -from webob import exc -from nova.api.openstack.contrib import simple_tenant_usage FLAGS = flags.FLAGS @@ -44,27 +39,6 @@ STOP = datetime.datetime.utcnow() START = STOP - datetime.timedelta(hours=HOURS) -def fake_get_session(): - class FakeFetcher(object): - def fetchall(fetcher_self): - # return 10 rows, 2 tenants, 5 servers each, each run for 1 day - return [get_fake_db_row(START, - STOP, - x, - "faketenant_%s" % (x / SERVERS)) - for x in xrange(TENANTS * SERVERS)] - - class FakeConn(object): - def execute(self, query): - return FakeFetcher() - - class FakeSession(object): - def connection(self): - return FakeConn() - - return FakeSession() - - def fake_instance_type_get(context, instance_type_id): return {'id': 1, 'vcpus': VCPUS, @@ -74,25 +48,32 @@ def fake_instance_type_get(context, instance_type_id): 'fakeflavor'} -def get_fake_db_row(start, end, instance_id, tenant_id): - return [instance_id, - '1', - tenant_id, - 'fakeuser', - 'name', - 'state', - 1, - start, - None] +def get_fake_db_instance(start, end, instance_id, tenant_id): + return {'id': instance_id, + 'image_ref': '1', + 'project_id': tenant_id, + 'user_id': 'fakeuser', + 'display_name': 'name', + 'state_description': 'state', + 'instance_type_id': 1, + 'launched_at': start, + 'terminated_at': end} + +def fake_instance_get_active_by_window(context, begin, end, project_id, fast): + return [get_fake_db_instance(START, + STOP, + x, + "faketenant_%s" % (x / SERVERS)) + for x in xrange(TENANTS * SERVERS)] class SimpleTenantUsageTest(test.TestCase): def setUp(self): super(SimpleTenantUsageTest, self).setUp() - self.stubs.Set(session, "get_session", - fake_get_session) self.stubs.Set(db, "instance_type_get", fake_instance_type_get) + self.stubs.Set(db, "instance_get_active_by_window", + fake_instance_get_active_by_window) self.admin_context = context.RequestContext('fakeadmin_0', 'faketenant_0', is_admin=True) @@ -104,13 +85,9 @@ class SimpleTenantUsageTest(test.TestCase): is_admin=False) FLAGS.allow_admin_api = True - def test_verify_db_fields_exist_in_instance_model(self): - for field in simple_tenant_usage.INSTANCE_FIELDS: - self.assertTrue(field in models.Instance.__table__.columns) - def test_verify_index(self): req = webob.Request.blank( - '/v1.1/os-simple-tenant-usage?start=%s&end=%s' % + '/v1.1/123/os-simple-tenant-usage?start=%s&end=%s' % (START.isoformat(), STOP.isoformat())) req.method = "GET" req.headers["content-type"] = "application/json" @@ -121,6 +98,8 @@ class SimpleTenantUsageTest(test.TestCase): self.assertEqual(res.status_int, 200) res_dict = json.loads(res.body) usages = res_dict['tenant_usages'] + from nova import log as logging + logging.warn(usages) for i in xrange(TENANTS): self.assertEqual(int(usages[i]['total_hours']), SERVERS * HOURS) @@ -134,7 +113,8 @@ class SimpleTenantUsageTest(test.TestCase): def test_verify_detailed_index(self): req = webob.Request.blank( - '/v1.1/os-simple-tenant-usage?detailed=1&start=%s&end=%s' % + '/v1.1/123/os-simple-tenant-usage?' + 'detailed=1&start=%s&end=%s' % (START.isoformat(), STOP.isoformat())) req.method = "GET" req.headers["content-type"] = "application/json" @@ -151,7 +131,8 @@ class SimpleTenantUsageTest(test.TestCase): def test_verify_index_fails_for_nonadmin(self): req = webob.Request.blank( - '/v1.1/os-simple-tenant-usage?detailed=1&start=%s&end=%s' % + '/v1.1/123/os-simple-tenant-usage?' + 'detailed=1&start=%s&end=%s' % (START.isoformat(), STOP.isoformat())) req.method = "GET" req.headers["content-type"] = "application/json" @@ -161,7 +142,8 @@ class SimpleTenantUsageTest(test.TestCase): def test_verify_show(self): req = webob.Request.blank( - '/v1.1/os-simple-tenant-usage/faketenant_0?start=%s&end=%s' % + '/v1.1/faketenant_0/os-simple-tenant-usage/' + 'faketenant_0?start=%s&end=%s' % (START.isoformat(), STOP.isoformat())) req.method = "GET" req.headers["content-type"] = "application/json" @@ -179,7 +161,8 @@ class SimpleTenantUsageTest(test.TestCase): def test_verify_show_cant_view_other_tenant(self): req = webob.Request.blank( - '/v1.1/os-simple-tenant-usage/faketenant_0?start=%s&end=%s' % + '/v1.1/faketenant_1/os-simple-tenant-usage/' + 'faketenant_0?start=%s&end=%s' % (START.isoformat(), STOP.isoformat())) req.method = "GET" req.headers["content-type"] = "application/json" -- cgit From 470b9dc73c5e27ef8716436fe22e9f32dbdffd28 Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Fri, 26 Aug 2011 17:40:22 -0700 Subject: add tests to verify NotFound exceptions are wrapped with the proper ids --- nova/tests/api/ec2/__init__.py | 19 +++++ nova/tests/api/ec2/test_middleware.py | 130 ++++++++++++++++++++++++++++++++++ nova/tests/test_cloud.py | 7 -- nova/tests/test_middleware.py | 85 ---------------------- 4 files changed, 149 insertions(+), 92 deletions(-) create mode 100644 nova/tests/api/ec2/__init__.py create mode 100644 nova/tests/api/ec2/test_middleware.py delete mode 100644 nova/tests/test_middleware.py diff --git a/nova/tests/api/ec2/__init__.py b/nova/tests/api/ec2/__init__.py new file mode 100644 index 000000000..6dab802f2 --- /dev/null +++ b/nova/tests/api/ec2/__init__.py @@ -0,0 +1,19 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2011 Openstack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +# NOTE(vish): this forces the fixtures from tests/__init.py:setup() to work +from nova.tests import * diff --git a/nova/tests/api/ec2/test_middleware.py b/nova/tests/api/ec2/test_middleware.py new file mode 100644 index 000000000..295f6c4ea --- /dev/null +++ b/nova/tests/api/ec2/test_middleware.py @@ -0,0 +1,130 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import webob +import webob.dec +import webob.exc + +from nova.api import ec2 +from nova import context +from nova import exception +from nova import flags +from nova import test +from nova import utils + +from xml.etree.ElementTree import fromstring as xml_to_tree + +FLAGS = flags.FLAGS + + +@webob.dec.wsgify +def conditional_forbid(req): + """Helper wsgi app returns 403 if param 'die' is 1.""" + if 'die' in req.params and req.params['die'] == '1': + raise webob.exc.HTTPForbidden() + return 'OK' + + +class LockoutTestCase(test.TestCase): + """Test case for the Lockout middleware.""" + def setUp(self): # pylint: disable=C0103 + super(LockoutTestCase, self).setUp() + utils.set_time_override() + self.lockout = ec2.Lockout(conditional_forbid) + + def tearDown(self): # pylint: disable=C0103 + utils.clear_time_override() + super(LockoutTestCase, self).tearDown() + + def _send_bad_attempts(self, access_key, num_attempts=1): + """Fail x.""" + for i in xrange(num_attempts): + req = webob.Request.blank('/?AWSAccessKeyId=%s&die=1' % access_key) + self.assertEqual(req.get_response(self.lockout).status_int, 403) + + def _is_locked_out(self, access_key): + """Sends a test request to see if key is locked out.""" + req = webob.Request.blank('/?AWSAccessKeyId=%s' % access_key) + return (req.get_response(self.lockout).status_int == 403) + + def test_lockout(self): + self._send_bad_attempts('test', FLAGS.lockout_attempts) + self.assertTrue(self._is_locked_out('test')) + + def test_timeout(self): + self._send_bad_attempts('test', FLAGS.lockout_attempts) + self.assertTrue(self._is_locked_out('test')) + utils.advance_time_seconds(FLAGS.lockout_minutes * 60) + self.assertFalse(self._is_locked_out('test')) + + def test_multiple_keys(self): + self._send_bad_attempts('test1', FLAGS.lockout_attempts) + self.assertTrue(self._is_locked_out('test1')) + self.assertFalse(self._is_locked_out('test2')) + utils.advance_time_seconds(FLAGS.lockout_minutes * 60) + self.assertFalse(self._is_locked_out('test1')) + self.assertFalse(self._is_locked_out('test2')) + + def test_window_timeout(self): + self._send_bad_attempts('test', FLAGS.lockout_attempts - 1) + self.assertFalse(self._is_locked_out('test')) + utils.advance_time_seconds(FLAGS.lockout_window * 60) + self._send_bad_attempts('test', FLAGS.lockout_attempts - 1) + self.assertFalse(self._is_locked_out('test')) + + +class ExecutorTestCase(test.TestCase): + def setUp(self): + super(ExecutorTestCase, self).setUp() + self.executor = ec2.Executor() + + def _execute(self, invoke): + class Fake(object): + pass + fake_ec2_request = Fake() + fake_ec2_request.invoke = invoke + + fake_wsgi_request = Fake() + + fake_wsgi_request.environ = { + 'nova.context': context.get_admin_context(), + 'ec2.request': fake_ec2_request, + } + return self.executor(fake_wsgi_request) + + def _extract_message(self, result): + tree = xml_to_tree(result.body) + return tree.findall('./Errors')[0].find('Error/Message').text + + def test_instance_not_found(self): + def not_found(context): + raise exception.InstanceNotFound(instance_id=5) + result = self._execute(not_found) + self.assertIn('i-00000005', self._extract_message(result)) + + def test_snapshot_not_found(self): + def not_found(context): + raise exception.SnapshotNotFound(snapshot_id=5) + result = self._execute(not_found) + self.assertIn('snap-00000005', self._extract_message(result)) + + def test_volume_not_found(self): + def not_found(context): + raise exception.VolumeNotFound(volume_id=5) + result = self._execute(not_found) + self.assertIn('vol-00000005', self._extract_message(result)) diff --git a/nova/tests/test_cloud.py b/nova/tests/test_cloud.py index 0793784f8..1bf12a06f 100644 --- a/nova/tests/test_cloud.py +++ b/nova/tests/test_cloud.py @@ -86,13 +86,6 @@ class CloudTestCase(test.TestCase): self.stubs.Set(rpc, 'cast', finish_cast) - def tearDown(self): - networks = db.project_get_networks(self.context, self.project_id, - associate=False) - for network in networks: - db.network_disassociate(self.context, network['id']) - super(CloudTestCase, self).tearDown() - def _create_key(self, name): # NOTE(vish): create depends on pool, so just call helper directly return cloud._gen_key(self.context, self.context.user_id, name) diff --git a/nova/tests/test_middleware.py b/nova/tests/test_middleware.py deleted file mode 100644 index 40d117c45..000000000 --- a/nova/tests/test_middleware.py +++ /dev/null @@ -1,85 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright 2010 United States Government as represented by the -# Administrator of the National Aeronautics and Space Administration. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import webob -import webob.dec -import webob.exc - -from nova.api import ec2 -from nova import flags -from nova import test -from nova import utils - - -FLAGS = flags.FLAGS - - -@webob.dec.wsgify -def conditional_forbid(req): - """Helper wsgi app returns 403 if param 'die' is 1.""" - if 'die' in req.params and req.params['die'] == '1': - raise webob.exc.HTTPForbidden() - return 'OK' - - -class LockoutTestCase(test.TestCase): - """Test case for the Lockout middleware.""" - def setUp(self): # pylint: disable=C0103 - super(LockoutTestCase, self).setUp() - utils.set_time_override() - self.lockout = ec2.Lockout(conditional_forbid) - - def tearDown(self): # pylint: disable=C0103 - utils.clear_time_override() - super(LockoutTestCase, self).tearDown() - - def _send_bad_attempts(self, access_key, num_attempts=1): - """Fail x.""" - for i in xrange(num_attempts): - req = webob.Request.blank('/?AWSAccessKeyId=%s&die=1' % access_key) - self.assertEqual(req.get_response(self.lockout).status_int, 403) - - def _is_locked_out(self, access_key): - """Sends a test request to see if key is locked out.""" - req = webob.Request.blank('/?AWSAccessKeyId=%s' % access_key) - return (req.get_response(self.lockout).status_int == 403) - - def test_lockout(self): - self._send_bad_attempts('test', FLAGS.lockout_attempts) - self.assertTrue(self._is_locked_out('test')) - - def test_timeout(self): - self._send_bad_attempts('test', FLAGS.lockout_attempts) - self.assertTrue(self._is_locked_out('test')) - utils.advance_time_seconds(FLAGS.lockout_minutes * 60) - self.assertFalse(self._is_locked_out('test')) - - def test_multiple_keys(self): - self._send_bad_attempts('test1', FLAGS.lockout_attempts) - self.assertTrue(self._is_locked_out('test1')) - self.assertFalse(self._is_locked_out('test2')) - utils.advance_time_seconds(FLAGS.lockout_minutes * 60) - self.assertFalse(self._is_locked_out('test1')) - self.assertFalse(self._is_locked_out('test2')) - - def test_window_timeout(self): - self._send_bad_attempts('test', FLAGS.lockout_attempts - 1) - self.assertFalse(self._is_locked_out('test')) - utils.advance_time_seconds(FLAGS.lockout_window * 60) - self._send_bad_attempts('test', FLAGS.lockout_attempts - 1) - self.assertFalse(self._is_locked_out('test')) -- cgit From 19cb3672f2849fe659173631f7f81ed489f1ea7e Mon Sep 17 00:00:00 2001 From: Jesse Andrews Date: Fri, 26 Aug 2011 17:46:47 -0700 Subject: v1.0 of server create injects first users keypair --- nova/api/openstack/create_instance_helper.py | 1 - nova/api/openstack/servers.py | 13 +++++++++++++ nova/tests/api/openstack/test_servers.py | 6 ++++-- 3 files changed, 17 insertions(+), 3 deletions(-) diff --git a/nova/api/openstack/create_instance_helper.py b/nova/api/openstack/create_instance_helper.py index e7428bf41..d82cb534f 100644 --- a/nova/api/openstack/create_instance_helper.py +++ b/nova/api/openstack/create_instance_helper.py @@ -19,7 +19,6 @@ import base64 from webob import exc from xml.dom import minidom -from nova import db from nova import exception from nova import flags from nova import log as logging diff --git a/nova/api/openstack/servers.py b/nova/api/openstack/servers.py index ad563b771..f288f2228 100644 --- a/nova/api/openstack/servers.py +++ b/nova/api/openstack/servers.py @@ -22,6 +22,7 @@ from xml.dom import minidom import webob from nova import compute +from nova import db from nova import exception from nova import flags from nova import log as logging @@ -563,6 +564,18 @@ class ControllerV10(Controller): raise exc.HTTPNotFound() return webob.Response(status_int=202) + def create(self, req, body): + """ Creates a new server for a given user """ + # note(ja): v1.0 injects the first keypair for the project for testing + if 'server' in body and not 'key_name' in body['server']: + context = req.environ["nova.context"] + keypairs = db.key_pair_get_all_by_user(context.elevated(), + context.user_id) + if keypairs: + body['server']['key_name'] = keypairs[0]['name'] + + return super(ControllerV10, self).create(req, body) + def _image_ref_from_req_data(self, data): return data['server']['imageId'] diff --git a/nova/tests/api/openstack/test_servers.py b/nova/tests/api/openstack/test_servers.py index bb531f462..c54bead49 100644 --- a/nova/tests/api/openstack/test_servers.py +++ b/nova/tests/api/openstack/test_servers.py @@ -1761,7 +1761,7 @@ class ServersTest(test.TestCase): body = dict(server=dict( name='server_test', imageRef=image_href, flavorRef=flavor_ref, key_name='nonexistentkey')) - req = webob.Request.blank('/v1.1/servers') + req = webob.Request.blank('/v1.1/fake/servers') req.method = 'POST' req.body = json.dumps(body) req.headers["content-type"] = "application/json" @@ -1781,7 +1781,7 @@ class ServersTest(test.TestCase): body = dict(server=dict( name='server_test', imageRef=image_href, flavorRef=flavor_ref, key_name='mykey')) - req = webob.Request.blank('/v1.1/servers') + req = webob.Request.blank('/v1.1/fake/servers') req.method = 'POST' req.body = json.dumps(body) req.headers["content-type"] = "application/json" @@ -3793,6 +3793,7 @@ class ServersViewBuilderV11Test(test.TestCase): "created": "2010-10-10T12:00:00Z", "progress": 0, "name": "test_server", + "key_name": "", "status": "BUILD", "hostId": '', "image": { @@ -3848,6 +3849,7 @@ class ServersViewBuilderV11Test(test.TestCase): "created": "2010-10-10T12:00:00Z", "progress": 0, "name": "test_server", + "key_name": "", "status": "BUILD", "hostId": '', "image": { -- cgit From 3a4ee30de0f619b7046e90ce9b6978e3a6dd20a2 Mon Sep 17 00:00:00 2001 From: Chris Behrens Date: Sat, 27 Aug 2011 14:07:55 -0700 Subject: Default rabbit max_retries to forever Modify carrot code to handle retry backoffs and obey max_retries = forever Fix some kombu issues from cut-n-paste Service should make sure to close the RPC connection --- nova/flags.py | 6 +++--- nova/rpc/impl_carrot.py | 28 +++++++++++++++++++++------- nova/rpc/impl_kombu.py | 35 +++++++++++++++++------------------ nova/service.py | 6 ++++++ 4 files changed, 47 insertions(+), 28 deletions(-) diff --git a/nova/flags.py b/nova/flags.py index ac70386e7..e09b4721a 100644 --- a/nova/flags.py +++ b/nova/flags.py @@ -302,9 +302,9 @@ DEFINE_bool('rabbit_use_ssl', False, 'connect over SSL') DEFINE_string('rabbit_userid', 'guest', 'rabbit userid') DEFINE_string('rabbit_password', 'guest', 'rabbit password') DEFINE_string('rabbit_virtual_host', '/', 'rabbit virtual host') -DEFINE_integer('rabbit_retry_interval', 10, 'rabbit connection retry interval') -DEFINE_integer('rabbit_interval_stepping', 2, 'rabbit connection retry backoff in seconds') -DEFINE_integer('rabbit_max_retries', 12, 'rabbit connection attempts') +DEFINE_integer('rabbit_retry_interval', 1, 'rabbit connection retry interval to start') +DEFINE_integer('rabbit_retry_backoff', 2, 'rabbit connection retry backoff in seconds') +DEFINE_integer('rabbit_max_retries', 0, 'maximum rabbit connection attempts (0=try forever)') DEFINE_string('control_exchange', 'nova', 'the main exchange to connect to') DEFINE_boolean('rabbit_durable_queues', False, 'use durable queues') DEFINE_list('enabled_apis', ['ec2', 'osapi'], diff --git a/nova/rpc/impl_carrot.py b/nova/rpc/impl_carrot.py index 529f98722..117489bc6 100644 --- a/nova/rpc/impl_carrot.py +++ b/nova/rpc/impl_carrot.py @@ -119,25 +119,34 @@ class Consumer(messaging.Consumer): """ def __init__(self, *args, **kwargs): - for i in xrange(FLAGS.rabbit_max_retries): - if i > 0: - time.sleep(FLAGS.rabbit_retry_interval) + max_retries = FALGS.rabbit_max_retries + sleep_time = FLAGS.rabbit_retry_interval + tries = 0 + while True: + tries += 1 + if tries > 1: + time.sleep(sleep_time) + # backoff for next retry attempt.. if there is one + sleep_time += FLAGS.rabbit_retry_backoff + if sleep_time > 30: + sleep_time = 30 try: super(Consumer, self).__init__(*args, **kwargs) self.failed_connection = False break except Exception as e: # Catching all because carrot sucks + self.failed_connection = True + if max_retries > 0 and tries == max_retries: + break fl_host = FLAGS.rabbit_host fl_port = FLAGS.rabbit_port - fl_intv = FLAGS.rabbit_retry_interval + fl_intv = sleep_time LOG.error(_('AMQP server on %(fl_host)s:%(fl_port)d is' ' unreachable: %(e)s. Trying again in %(fl_intv)d' ' seconds.') % locals()) - self.failed_connection = True if self.failed_connection: LOG.error(_('Unable to connect to AMQP server ' - 'after %d tries. Shutting down.'), - FLAGS.rabbit_max_retries) + 'after %(tries)d tries. Shutting down.') % locals()) sys.exit(1) def fetch(self, no_ack=None, auto_ack=None, enable_callbacks=False): @@ -520,6 +529,11 @@ class MulticallWaiter(object): yield result +def create_connection(new=True): + """Create a connection""" + return Connection.instance(new=new) + + def create_consumer(conn, topic, proxy, fanout=False): """Create a consumer that calls methods in the proxy""" if fanout: diff --git a/nova/rpc/impl_kombu.py b/nova/rpc/impl_kombu.py index 65199808e..db839dd2a 100644 --- a/nova/rpc/impl_kombu.py +++ b/nova/rpc/impl_kombu.py @@ -288,9 +288,13 @@ class Connection(object): def __init__(self): self.queues = [] self.max_retries = FLAGS.rabbit_max_retries + # Try forever? + if self.max_retries <= 0: + self.max_retries = None self.interval_start = FLAGS.rabbit_retry_interval - self.interval_stepping = FLAGS.rabbit_interval_stepping - self.interval_max = FLAGS.rabbit_retry_interval + self.interval_stepping = FLAGS.rabbit_retry_backoff + # max retry-interval = 30 seconds + self.interval_max = 30 self.params = dict(hostname=FLAGS.rabbit_host, port=FLAGS.rabbit_port, @@ -302,16 +306,6 @@ class Connection(object): self.connection = None self.reconnect() - @classmethod - def instance(cls, new=True): - """Returns the instance.""" - if new or not hasattr(cls, '_instance'): - if new: - return cls() - else: - cls._instance = cls() - return cls._instance - def reconnect(self): """Handles reconnecting and re-estblishing queues""" if self.connection: @@ -330,12 +324,12 @@ class Connection(object): interval_step=self.interval_stepping, interval_max=self.interval_max) except self.connection.connection_errors, e: + # We should only get here if max_retries is set. We'll go + # ahead and exit in this case. err_str = str(e) - max_retries = FLAGS.rabbit_max_retries + max_retries = self.max_retries LOG.error(_('Unable to connect to AMQP server ' 'after %(max_retries)d tries: %(err_str)s') % locals()) - # NOTE(comstud): Original carrot code exits after so many - # attempts, but I wonder if we should re-try indefinitely sys.exit(1) LOG.info(_('Connected to AMQP server on %(hostname)s:%(port)d' % self.params)) @@ -448,7 +442,7 @@ class Pool(pools.Pool): # TODO(comstud): Timeout connections not used in a while def create(self): LOG.debug('Creating new connection') - return RPCIMPL.Connection() + return Connection() # Create a ConnectionPool to use for RPC calls. We'll order the # pool as a stack (LIFO), so that we can potentially loop through and @@ -464,7 +458,7 @@ class ConnectionContext(object): if pooled: self.connection = ConnectionPool.get() else: - self.connection = RPCIMPL.Connection() + self.connection = Connection() self.pooled = pooled def __enter__(self): @@ -636,6 +630,11 @@ class MulticallWaiter(object): yield result +def create_connection(new=True): + """Create a connection""" + return ConnectionContext(pooled=not new) + + def create_consumer(conn, topic, proxy, fanout=False): """Create a consumer that calls a method in a proxy object""" if fanout: @@ -649,7 +648,7 @@ def create_consumer_set(conn, consumers): # Returns an object that you can call .wait() on to consume # all queues? # Needs to have a .close() which will stop consuming? - # Needs to also have an attach_to_eventlet method for tests? + # Needs to also have an method for tests? raise NotImplemented diff --git a/nova/service.py b/nova/service.py index 959e79052..a872a36ee 100644 --- a/nova/service.py +++ b/nova/service.py @@ -242,6 +242,12 @@ class Service(object): self.consumer_set_thread.wait() except greenlet.GreenletExit: pass + # Try to shut the connection down, but if we get any sort of + # errors, go ahead and ignore them.. as we're shutting down anyway + try: + self.conn.close() + except Exception: + pass for x in self.timers: try: x.stop() -- cgit From 4faaf9c37d030d68cfea818d396963e3ed7deeaa Mon Sep 17 00:00:00 2001 From: Chris Behrens Date: Sat, 27 Aug 2011 21:33:14 -0700 Subject: fix FALGS typo --- nova/rpc/impl_carrot.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nova/rpc/impl_carrot.py b/nova/rpc/impl_carrot.py index 117489bc6..40097e10e 100644 --- a/nova/rpc/impl_carrot.py +++ b/nova/rpc/impl_carrot.py @@ -119,7 +119,7 @@ class Consumer(messaging.Consumer): """ def __init__(self, *args, **kwargs): - max_retries = FALGS.rabbit_max_retries + max_retries = FLAGS.rabbit_max_retries sleep_time = FLAGS.rabbit_retry_interval tries = 0 while True: -- cgit From 256cb956abeff85f3cddce499b488dd112c4137d Mon Sep 17 00:00:00 2001 From: Chris Behrens Date: Sun, 28 Aug 2011 17:33:11 -0700 Subject: start to rework some consumer stuff --- nova/rpc/impl_kombu.py | 127 +++++++++++++++++++++++++++++-------------------- nova/service.py | 24 ++-------- 2 files changed, 81 insertions(+), 70 deletions(-) diff --git a/nova/rpc/impl_kombu.py b/nova/rpc/impl_kombu.py index db839dd2a..01871606c 100644 --- a/nova/rpc/impl_kombu.py +++ b/nova/rpc/impl_kombu.py @@ -35,11 +35,11 @@ flags.DEFINE_integer('rpc_thread_pool_size', 1024, 'Size of RPC thread pool') -class QueueBase(object): - """Queue base class.""" +class ConsumerBase(object): + """Consumer base class.""" def __init__(self, channel, callback, tag, **kwargs): - """Init the queue. + """Declare a queue on an amqp channel. 'channel' is the amqp channel to use 'callback' is the callback to call when messages are received @@ -55,20 +55,21 @@ class QueueBase(object): self.reconnect(channel) def reconnect(self, channel): - """Re-create the queue after a rabbit reconnect""" + """Re-declare the queue after a rabbit reconnect""" self.channel = channel self.kwargs['channel'] = channel self.queue = kombu.entity.Queue(**self.kwargs) self.queue.declare() def consume(self, *args, **kwargs): - """Consume from this queue. + """Actually declare the consumer on the amqp channel. This will + start the flow of messages from the queue. Using the + Connection.iterconsume() iterator will process the messages, + calling the appropriate callback. + If a callback is specified in kwargs, use that. Otherwise, use the callback passed during __init__() - The callback will be called if a message was read off of the - queue. - If kwargs['nowait'] is True, then this call will block until a message is read. @@ -100,7 +101,7 @@ class QueueBase(object): self.queue = None -class DirectQueue(QueueBase): +class DirectConsumer(ConsumerBase): """Queue/consumer class for 'direct'""" def __init__(self, channel, msg_id, callback, tag, **kwargs): @@ -123,7 +124,7 @@ class DirectQueue(QueueBase): type='direct', durable=options['durable'], auto_delete=options['auto_delete']) - super(DirectQueue, self).__init__( + super(DirectConsumer, self).__init__( channel, callback, tag, @@ -133,8 +134,8 @@ class DirectQueue(QueueBase): **options) -class TopicQueue(QueueBase): - """Queue/consumer class for 'topic'""" +class TopicConsumer(ConsumerBase): + """Consumer class for 'topic'""" def __init__(self, channel, topic, callback, tag, **kwargs): """Init a 'topic' queue. @@ -156,7 +157,7 @@ class TopicQueue(QueueBase): type='topic', durable=options['durable'], auto_delete=options['auto_delete']) - super(TopicQueue, self).__init__( + super(TopicConsumer, self).__init__( channel, callback, tag, @@ -166,8 +167,8 @@ class TopicQueue(QueueBase): **options) -class FanoutQueue(QueueBase): - """Queue/consumer class for 'fanout'""" +class FanoutConsumer(ConsumerBase): + """Consumer class for 'fanout'""" def __init__(self, channel, topic, callback, tag, **kwargs): """Init a 'fanout' queue. @@ -193,7 +194,7 @@ class FanoutQueue(QueueBase): type='fanout', durable=options['durable'], auto_delete=options['auto_delete']) - super(FanoutQueue, self).__init__( + super(FanoutConsumer, self).__init__( channel, callback, tag, @@ -286,7 +287,8 @@ class Connection(object): """Connection instance object.""" def __init__(self): - self.queues = [] + self.consumers = [] + self.consumer_thread = None self.max_retries = FLAGS.rabbit_max_retries # Try forever? if self.max_retries <= 0: @@ -334,9 +336,9 @@ class Connection(object): LOG.info(_('Connected to AMQP server on %(hostname)s:%(port)d' % self.params)) self.channel = self.connection.channel() - for consumer in self.queues: + for consumer in self.consumers: consumer.reconnect(self.channel) - if self.queues: + if self.consumers: LOG.debug(_("Re-established AMQP queues")) def get_channel(self): @@ -354,30 +356,32 @@ class Connection(object): def close(self): """Close/release this connection""" + self.cancel_consumer_thread() self.connection.release() self.connection = None def reset(self): """Reset a connection so it can be used again""" + self.cancel_consumer_thread() self.channel.close() self.channel = self.connection.channel() - self.queues = [] + self.consumers = [] - def create_queue(self, queue_cls, topic, callback): - """Create a queue using the class that was passed in and - add it to our list of queues used for consuming + def declare_consumer(self, consumer_cls, topic, callback): + """Create a Consumer using the class that was passed in and + add it to our list of consumers """ - queue = queue_cls(self.channel, topic, callback, - self.queue_num.next()) - self.queues.append(queue) - return queue + consumer = consumer_cls(self.channel, topic, callback, + self.consumer_num.next()) + self.consumers.append(consumer) + return consumer - def consume(self, limit=None): - """Consume from all queues""" + def iterconsume(self, limit=None): + """Return an iterator that will consume from all queues/consumers""" while True: try: - queues_head = self.queues[:-1] - queues_tail = self.queues[-1] + queues_head = self.consumers[:-1] + queues_tail = self.consumers[-1] for queue in queues_head: queue.consume(nowait=True) queues_tail.consume(nowait=False) @@ -391,6 +395,36 @@ class Connection(object): '%s' % str(e))) self.reconnect() + def consume(self, limit=None): + """Consume from all queues/consumers""" + it = self.iterconsume(limit=limit) + while True: + try: + it.next() + except StopIteration: + return + + def consume_in_thread(self): + """Consumer from all queues/consumers in a greenthread""" + def _consumer_thread(): + try: + self.consume() + except greenlet.GreenletExit: + return + if not self.consumer_thread: + self.consumer_thread = eventlet.spawn(_consumer_thread) + return self.consumer_thread + + def cancel_consumer_thread(self): + """Cancel a consumer thread""" + if self.consumer_thread: + self.consumer_thread.kill() + try: + self.consumer_thread.wait() + except greenlet.GreenletExit: + pass + self.consumer_thread = None + def publisher_send(self, cls, topic, msg): """Send to a publisher based on the publisher class""" while True: @@ -408,20 +442,20 @@ class Connection(object): except self.connection.connection_errors, e: pass - def direct_consumer(self, topic, callback): + def declare_direct_consumer(self, topic, callback): """Create a 'direct' queue. In nova's use, this is generally a msg_id queue used for responses for call/multicall """ - return self.create_queue(DirectQueue, topic, callback) + self.declare_consumer(DirectConsumer, topic, callback) - def topic_consumer(self, topic, callback=None): - """Create a 'topic' queue.""" - return self.create_queue(TopicQueue, topic, callback) + def declare_topic_consumer(self, topic, callback=None): + """Create a 'topic' consumer.""" + self.declare_consumer(TopicConsumer, topic, callback) - def fanout_consumer(self, topic, callback): - """Create a 'fanout' queue""" - return self.create_queue(FanoutQueue, topic, callback) + def declare_fanout_consumer(self, topic, callback): + """Create a 'fanout' consumer""" + self.declare_consumer(FanoutConsumer, topic, callback) def direct_send(self, msg_id, msg): """Send a 'direct' message""" @@ -638,18 +672,9 @@ def create_connection(new=True): def create_consumer(conn, topic, proxy, fanout=False): """Create a consumer that calls a method in a proxy object""" if fanout: - return conn.fanout_consumer(topic, ProxyCallback(proxy)) + conn.declare_fanout_consumer(topic, ProxyCallback(proxy)) else: - return conn.topic_consumer(topic, ProxyCallback(proxy)) - - -def create_consumer_set(conn, consumers): - # FIXME(comstud): Replace this however necessary - # Returns an object that you can call .wait() on to consume - # all queues? - # Needs to have a .close() which will stop consuming? - # Needs to also have an method for tests? - raise NotImplemented + conn.declare_topic_consumer(topic, ProxyCallback(proxy)) def multicall(context, topic, msg): @@ -666,7 +691,7 @@ def multicall(context, topic, msg): conn = ConnectionContext() wait_msg = MulticallWaiter(conn) - conn.direct_consumer(msg_id, wait_msg) + conn.declare_direct_consumer(msg_id, wait_msg) conn.topic_send(topic, msg) return wait_msg diff --git a/nova/service.py b/nova/service.py index a872a36ee..ab7925eb3 100644 --- a/nova/service.py +++ b/nova/service.py @@ -153,26 +153,17 @@ class Service(object): self.topic) # Share this same connection for these Consumers - consumer_all = rpc.create_consumer(self.conn, self.topic, self, + rpc.create_consumer(self.conn, self.topic, self, fanout=False) node_topic = '%s.%s' % (self.topic, self.host) - consumer_node = rpc.create_consumer(self.conn, node_topic, self, + rpc.create_consumer(self.conn, node_topic, self, fanout=False) - fanout = rpc.create_consumer(self.conn, self.topic, self, fanout=True) + rpc.create_consumer(self.conn, self.topic, self, fanout=True) - consumers = [consumer_all, consumer_node, fanout] - consumer_set = rpc.create_consumer_set(self.conn, consumers) - - # Wait forever, processing these consumers - def _wait(): - try: - consumer_set.wait() - finally: - consumer_set.close() - - self.consumer_set_thread = eventlet.spawn(_wait) + # Consume from all consumers in a thread + self.conn.consume_in_thread() if self.report_interval: pulse = utils.LoopingCall(self.report_state) @@ -237,11 +228,6 @@ class Service(object): logging.warn(_('Service killed that has no database entry')) def stop(self): - self.consumer_set_thread.kill() - try: - self.consumer_set_thread.wait() - except greenlet.GreenletExit: - pass # Try to shut the connection down, but if we get any sort of # errors, go ahead and ignore them.. as we're shutting down anyway try: -- cgit From 32943729861ba4ad562e899a55af77b7974af8db Mon Sep 17 00:00:00 2001 From: Chris Behrens Date: Sun, 28 Aug 2011 18:17:21 -0700 Subject: fix test_rpc and kombu stuff --- nova/rpc/FIXME | 2 -- nova/rpc/__init__.py | 7 +------ nova/rpc/impl_carrot.py | 56 +++++++++++++++++++++++++++++++++++++++++-------- nova/rpc/impl_kombu.py | 21 ++++++++++++++----- nova/tests/test_rpc.py | 25 +++++++++++++--------- 5 files changed, 79 insertions(+), 32 deletions(-) delete mode 100644 nova/rpc/FIXME diff --git a/nova/rpc/FIXME b/nova/rpc/FIXME deleted file mode 100644 index 704081802..000000000 --- a/nova/rpc/FIXME +++ /dev/null @@ -1,2 +0,0 @@ -Move some code duplication between carrot/kombu into common.py -The other FIXMEs in __init__.py and impl_kombu.py diff --git a/nova/rpc/__init__.py b/nova/rpc/__init__.py index 9371c2ab3..10b69c8b5 100644 --- a/nova/rpc/__init__.py +++ b/nova/rpc/__init__.py @@ -38,18 +38,13 @@ RPCIMPL = import_object(impl_table.get(FLAGS.rpc_backend, def create_connection(new=True): - return RPCIMPL.Connection.instance(new=True) + return RPCIMPL.create_connection(new=new) def create_consumer(conn, topic, proxy, fanout=False): return RPCIMPL.create_consumer(conn, topic, proxy, fanout) -def create_consumer_set(conn, consumers): - # FIXME(comstud): replace however necessary - return RPCIMPL.ConsumerSet(connection=conn, consumer_list=consumers) - - def call(context, topic, msg): return RPCIMPL.call(context, topic, msg) diff --git a/nova/rpc/impl_carrot.py b/nova/rpc/impl_carrot.py index 40097e10e..efff788a0 100644 --- a/nova/rpc/impl_carrot.py +++ b/nova/rpc/impl_carrot.py @@ -33,6 +33,7 @@ import uuid from carrot import connection as carrot_connection from carrot import messaging +import eventlet from eventlet import greenpool from eventlet import pools from eventlet import queue @@ -42,10 +43,10 @@ from nova import context from nova import exception from nova import fakerabbit from nova import flags -from nova import log as logging -from nova import utils from nova.rpc.common import RemoteError, LOG +# Needed for tests +eventlet.monkey_patch() FLAGS = flags.FLAGS flags.DEFINE_integer('rpc_thread_pool_size', 1024, @@ -57,6 +58,11 @@ flags.DEFINE_integer('rpc_conn_pool_size', 30, class Connection(carrot_connection.BrokerConnection): """Connection instance object.""" + def __init__(self, *args, **kwargs): + super(Connection, self).__init__(*args, **kwargs) + self._rpc_consumers = [] + self._rpc_consumer_thread = None + @classmethod def instance(cls, new=True): """Returns the instance.""" @@ -94,6 +100,42 @@ class Connection(carrot_connection.BrokerConnection): pass return cls.instance() + def close(self): + self.cancel_consumer_thread() + for consumer in self._rpc_consumers: + try: + consumer.close() + except Exception: + # ignore all errors + pass + self._rpc_consumers = [] + super(Connection, self).close() + + def consume_in_thread(self): + """Consumer from all queues/consumers in a greenthread""" + + consumer_set = ConsumerSet(connection=self, + consumer_list=self._rpc_consumers) + + def _consumer_thread(): + try: + consumer_set.wait() + except greenlet.GreenletExit: + return + if not self._rpc_consumer_thread: + self._rpc_consumer_thread = eventlet.spawn(_consumer_thread) + return self._rpc_consumer_thread + + def cancel_consumer_thread(self): + """Cancel a consumer thread""" + if self._rpc_consumer_thread: + self._rpc_consumer_thread.kill() + try: + self._rpc_consumer_thread.wait() + except greenlet.GreenletExit: + pass + self._rpc_consumer_thread = None + class Pool(pools.Pool): """Class that implements a Pool of Connections.""" @@ -119,6 +161,7 @@ class Consumer(messaging.Consumer): """ def __init__(self, *args, **kwargs): + connection = kwargs.get('connection') max_retries = FLAGS.rabbit_max_retries sleep_time = FLAGS.rabbit_retry_interval tries = 0 @@ -148,6 +191,7 @@ class Consumer(messaging.Consumer): LOG.error(_('Unable to connect to AMQP server ' 'after %(tries)d tries. Shutting down.') % locals()) sys.exit(1) + connection._rpc_consumers.append(self) def fetch(self, no_ack=None, auto_ack=None, enable_callbacks=False): """Wraps the parent fetch with some logic for failed connection.""" @@ -175,12 +219,6 @@ class Consumer(messaging.Consumer): LOG.exception(_('Failed to fetch message from queue: %s' % e)) self.failed_connection = True - def attach_to_eventlet(self): - """Only needed for unit tests!""" - timer = utils.LoopingCall(self.fetch, enable_callbacks=True) - timer.start(0.1) - return timer - class AdapterConsumer(Consumer): """Calls methods on a proxy object based on method and args.""" @@ -251,7 +289,7 @@ class AdapterConsumer(Consumer): # NOTE(vish): this iterates through the generator list(rval) except Exception as e: - logging.exception('Exception during message handling') + LOG.exception('Exception during message handling') if msg_id: msg_reply(msg_id, None, sys.exc_info()) return diff --git a/nova/rpc/impl_kombu.py b/nova/rpc/impl_kombu.py index 01871606c..bd83bc520 100644 --- a/nova/rpc/impl_kombu.py +++ b/nova/rpc/impl_kombu.py @@ -14,9 +14,6 @@ # License for the specific language governing permissions and limitations # under the License. -from nova import flags -from nova.rpc.common import RemoteError, LOG - import kombu import kombu.entity import kombu.messaging @@ -24,8 +21,22 @@ import kombu.connection import itertools import sys import time +import traceback +import types import uuid +import eventlet +from eventlet import greenpool +from eventlet import pools +import greenlet + +from nova import context +from nova import exception +from nova import flags +from nova.rpc.common import RemoteError, LOG + +# Needed for tests +eventlet.monkey_patch() FLAGS = flags.FLAGS @@ -317,7 +328,7 @@ class Connection(object): pass time.sleep(1) self.connection = kombu.connection.Connection(**self.params) - self.queue_num = itertools.count(1) + self.consumer_num = itertools.count(1) try: self.connection.ensure_connection(errback=self.connect_error, @@ -634,7 +645,7 @@ class RpcContext(context.RequestContext): class MulticallWaiter(object): def __init__(self, connection): self._connection = connection - self._iterator = connection.consume() + self._iterator = connection.iterconsume() self._result = None self._done = False diff --git a/nova/tests/test_rpc.py b/nova/tests/test_rpc.py index ba9c0a859..2b9922491 100644 --- a/nova/tests/test_rpc.py +++ b/nova/tests/test_rpc.py @@ -33,13 +33,17 @@ class RpcTestCase(test.TestCase): super(RpcTestCase, self).setUp() self.conn = rpc.create_connection(True) self.receiver = TestReceiver() - self.consumer = rpc.create_consumer(self.conn, - 'test', - self.receiver, - False) - self.consumer.attach_to_eventlet() + rpc.create_consumer(self.conn, + 'test', + self.receiver, + False) + self.conn.consume_in_thread() self.context = context.get_admin_context() + def tearDown(self): + self.conn.close() + super(RpcTestCase, self).tearDown() + def test_call_succeed(self): value = 42 result = rpc.call(self.context, 'test', {"method": "echo", @@ -139,16 +143,17 @@ class RpcTestCase(test.TestCase): nested = Nested() conn = rpc.create_connection(True) - consumer = rpc.create_consumer(conn, - 'nested', - nested, - False) - consumer.attach_to_eventlet() + rpc.create_consumer(conn, + 'nested', + nested, + False) + conn.consume_in_thread() value = 42 result = rpc.call(self.context, 'nested', {"method": "echo", "args": {"queue": "test", "value": value}}) + conn.close() self.assertEqual(value, result) -- cgit From e5310d666f167efe6e3c9f97176d13801489fdfd Mon Sep 17 00:00:00 2001 From: Chris Behrens Date: Sun, 28 Aug 2011 18:18:40 -0700 Subject: fix nova-ajax-console-proxy --- bin/nova-ajax-console-proxy | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) diff --git a/bin/nova-ajax-console-proxy b/bin/nova-ajax-console-proxy index 0a789b4b9..b3205ec56 100755 --- a/bin/nova-ajax-console-proxy +++ b/bin/nova-ajax-console-proxy @@ -113,11 +113,11 @@ class AjaxConsoleProxy(object): AjaxConsoleProxy.tokens[kwargs['token']] = \ {'args': kwargs, 'last_activity': time.time()} - conn = rpc.create_connection(new=True) - consumer = rpc.create_consumer( - conn, - FLAGS.ajax_console_proxy_topic, - TopicProxy) + self.conn = rpc.create_connection(new=True) + rpc.create_consumer( + self.conn, + FLAGS.ajax_console_proxy_topic, + TopicProxy) def delete_expired_tokens(): now = time.time() @@ -129,7 +129,7 @@ class AjaxConsoleProxy(object): for k in to_delete: del AjaxConsoleProxy.tokens[k] - utils.LoopingCall(consumer.fetch, enable_callbacks=True).start(0.1) + self.conn.consume_in_thread() utils.LoopingCall(delete_expired_tokens).start(1) if __name__ == '__main__': @@ -142,3 +142,4 @@ if __name__ == '__main__': server = wsgi.Server("AJAX Console Proxy", acp, port=acp_port) service.serve(server) service.wait() + self.conn.close() -- cgit From da11af2893719677a9113ce391d37b0dada6585c Mon Sep 17 00:00:00 2001 From: Chris Behrens Date: Sun, 28 Aug 2011 18:19:41 -0700 Subject: fix nova/tests/test_test.py --- nova/tests/test_test.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/nova/tests/test_test.py b/nova/tests/test_test.py index 64f11fa45..6075abbb0 100644 --- a/nova/tests/test_test.py +++ b/nova/tests/test_test.py @@ -40,6 +40,6 @@ class IsolationTestCase(test.TestCase): connection = rpc.create_connection(new=True) proxy = NeverCalled() - consumer = rpc.create_consumer(connection, 'compute', - proxy, fanout=False) - consumer.attach_to_eventlet() + rpc.create_consumer(connection, 'compute', + proxy, fanout=False) + connection.consume_in_thread() -- cgit From 6fbb35d596f670d6dcdda2486a12fc09ef9be853 Mon Sep 17 00:00:00 2001 From: Chris Behrens Date: Sun, 28 Aug 2011 19:22:53 -0700 Subject: add carrot/kombu tests... small thread fix for kombu --- nova/rpc/__init__.py | 4 +- nova/rpc/common.py | 6 + nova/rpc/impl_carrot.py | 4 - nova/rpc/impl_kombu.py | 12 +- nova/tests/test_rpc_amqp.py | 88 -------------- nova/tests/test_rpc_carrot.py | 202 ++++++++++++++++++++++++++++++++ nova/tests/test_rpc_kombu.py | 266 ++++++++++++++++++++++++++++++++++++++++++ 7 files changed, 481 insertions(+), 101 deletions(-) delete mode 100644 nova/tests/test_rpc_amqp.py create mode 100644 nova/tests/test_rpc_carrot.py create mode 100644 nova/tests/test_rpc_kombu.py diff --git a/nova/rpc/__init__.py b/nova/rpc/__init__.py index 10b69c8b5..2a47ba87b 100644 --- a/nova/rpc/__init__.py +++ b/nova/rpc/__init__.py @@ -23,7 +23,7 @@ from nova import flags FLAGS = flags.FLAGS flags.DEFINE_string('rpc_backend', - 'carrot', + 'kombu', "The messaging module to use, defaults to carrot.") impl_table = {'kombu': 'nova.rpc.impl_kombu', @@ -42,7 +42,7 @@ def create_connection(new=True): def create_consumer(conn, topic, proxy, fanout=False): - return RPCIMPL.create_consumer(conn, topic, proxy, fanout) + RPCIMPL.create_consumer(conn, topic, proxy, fanout) def call(context, topic, msg): diff --git a/nova/rpc/common.py b/nova/rpc/common.py index 1d3065a83..b8c280630 100644 --- a/nova/rpc/common.py +++ b/nova/rpc/common.py @@ -1,8 +1,14 @@ from nova import exception +from nova import flags from nova import log as logging LOG = logging.getLogger('nova.rpc') +flags.DEFINE_integer('rpc_thread_pool_size', 1024, + 'Size of RPC thread pool') +flags.DEFINE_integer('rpc_conn_pool_size', 30, + 'Size of RPC connection pool') + class RemoteError(exception.Error): """Signifies that a remote class has raised an exception. diff --git a/nova/rpc/impl_carrot.py b/nova/rpc/impl_carrot.py index efff788a0..07af0a116 100644 --- a/nova/rpc/impl_carrot.py +++ b/nova/rpc/impl_carrot.py @@ -49,10 +49,6 @@ from nova.rpc.common import RemoteError, LOG eventlet.monkey_patch() FLAGS = flags.FLAGS -flags.DEFINE_integer('rpc_thread_pool_size', 1024, - 'Size of RPC thread pool') -flags.DEFINE_integer('rpc_conn_pool_size', 30, - 'Size of RPC connection pool') class Connection(carrot_connection.BrokerConnection): diff --git a/nova/rpc/impl_kombu.py b/nova/rpc/impl_kombu.py index bd83bc520..49bca1d81 100644 --- a/nova/rpc/impl_kombu.py +++ b/nova/rpc/impl_kombu.py @@ -40,11 +40,6 @@ eventlet.monkey_patch() FLAGS = flags.FLAGS -flags.DEFINE_integer('rpc_conn_pool_size', 30, - 'Size of RPC connection pool') -flags.DEFINE_integer('rpc_thread_pool_size', 1024, - 'Size of RPC thread pool') - class ConsumerBase(object): """Consumer base class.""" @@ -328,6 +323,9 @@ class Connection(object): pass time.sleep(1) self.connection = kombu.connection.Connection(**self.params) + if FLAGS.fake_rabbit: + # Kludge to speed up tests. + self.connection.transport.polling_interval = 0.0 self.consumer_num = itertools.count(1) try: @@ -422,13 +420,13 @@ class Connection(object): self.consume() except greenlet.GreenletExit: return - if not self.consumer_thread: + if self.consumer_thread is None: self.consumer_thread = eventlet.spawn(_consumer_thread) return self.consumer_thread def cancel_consumer_thread(self): """Cancel a consumer thread""" - if self.consumer_thread: + if self.consumer_thread is not None: self.consumer_thread.kill() try: self.consumer_thread.wait() diff --git a/nova/tests/test_rpc_amqp.py b/nova/tests/test_rpc_amqp.py deleted file mode 100644 index 2215a908b..000000000 --- a/nova/tests/test_rpc_amqp.py +++ /dev/null @@ -1,88 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright (c) 2010 Openstack, LLC. -# Administrator of the National Aeronautics and Space Administration. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -Tests For RPC AMQP. -""" - -from nova import context -from nova import log as logging -from nova import rpc -from nova.rpc import amqp -from nova import test - - -LOG = logging.getLogger('nova.tests.rpc') - - -class RpcAMQPTestCase(test.TestCase): - def setUp(self): - super(RpcAMQPTestCase, self).setUp() - self.conn = rpc.create_connection(True) - self.receiver = TestReceiver() - self.consumer = rpc.create_consumer(self.conn, - 'test', - self.receiver, - False) - self.consumer.attach_to_eventlet() - self.context = context.get_admin_context() - - def test_connectionpool_single(self): - """Test that ConnectionPool recycles a single connection.""" - conn1 = amqp.ConnectionPool.get() - amqp.ConnectionPool.put(conn1) - conn2 = amqp.ConnectionPool.get() - amqp.ConnectionPool.put(conn2) - self.assertEqual(conn1, conn2) - - -class TestReceiver(object): - """Simple Proxy class so the consumer has methods to call. - - Uses static methods because we aren't actually storing any state. - - """ - - @staticmethod - def echo(context, value): - """Simply returns whatever value is sent in.""" - LOG.debug(_("Received %s"), value) - return value - - @staticmethod - def context(context, value): - """Returns dictionary version of context.""" - LOG.debug(_("Received %s"), context) - return context.to_dict() - - @staticmethod - def echo_three_times(context, value): - context.reply(value) - context.reply(value + 1) - context.reply(value + 2) - - @staticmethod - def echo_three_times_yield(context, value): - yield value - yield value + 1 - yield value + 2 - - @staticmethod - def fail(context, value): - """Raises an exception with the value sent in.""" - raise Exception(value) diff --git a/nova/tests/test_rpc_carrot.py b/nova/tests/test_rpc_carrot.py new file mode 100644 index 000000000..cf84980ab --- /dev/null +++ b/nova/tests/test_rpc_carrot.py @@ -0,0 +1,202 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +""" +Unit Tests for remote procedure calls using queue +""" + +from nova import context +from nova import log as logging +from nova.rpc import impl_carrot as rpc +from nova import test + + +LOG = logging.getLogger('nova.tests.rpc') + + +class RpcCarrotTestCase(test.TestCase): + def setUp(self): + super(RpcCarrotTestCase, self).setUp() + self.conn = rpc.create_connection(True) + self.receiver = TestReceiver() + rpc.create_consumer(self.conn, + 'test', + self.receiver, + False) + self.conn.consume_in_thread() + self.context = context.get_admin_context() + + def tearDown(self): + self.conn.close() + super(RpcCarrotTestCase, self).tearDown() + + def test_connectionpool_single(self): + """Test that ConnectionPool recycles a single connection.""" + conn1 = rpc.ConnectionPool.get() + rpc.ConnectionPool.put(conn1) + conn2 = rpc.ConnectionPool.get() + rpc.ConnectionPool.put(conn2) + self.assertEqual(conn1, conn2) + + def test_call_succeed(self): + value = 42 + result = rpc.call(self.context, 'test', {"method": "echo", + "args": {"value": value}}) + self.assertEqual(value, result) + + def test_call_succeed_despite_multiple_returns(self): + value = 42 + result = rpc.call(self.context, 'test', {"method": "echo_three_times", + "args": {"value": value}}) + self.assertEqual(value + 2, result) + + def test_call_succeed_despite_multiple_returns_yield(self): + value = 42 + result = rpc.call(self.context, 'test', + {"method": "echo_three_times_yield", + "args": {"value": value}}) + self.assertEqual(value + 2, result) + + def test_multicall_succeed_once(self): + value = 42 + result = rpc.multicall(self.context, + 'test', + {"method": "echo", + "args": {"value": value}}) + for i, x in enumerate(result): + if i > 0: + self.fail('should only receive one response') + self.assertEqual(value + i, x) + + def test_multicall_succeed_three_times(self): + value = 42 + result = rpc.multicall(self.context, + 'test', + {"method": "echo_three_times", + "args": {"value": value}}) + for i, x in enumerate(result): + self.assertEqual(value + i, x) + + def test_multicall_succeed_three_times_yield(self): + value = 42 + result = rpc.multicall(self.context, + 'test', + {"method": "echo_three_times_yield", + "args": {"value": value}}) + for i, x in enumerate(result): + self.assertEqual(value + i, x) + + def test_context_passed(self): + """Makes sure a context is passed through rpc call.""" + value = 42 + result = rpc.call(self.context, + 'test', {"method": "context", + "args": {"value": value}}) + self.assertEqual(self.context.to_dict(), result) + + def test_call_exception(self): + """Test that exception gets passed back properly. + + rpc.call returns a RemoteError object. The value of the + exception is converted to a string, so we convert it back + to an int in the test. + + """ + value = 42 + self.assertRaises(rpc.RemoteError, + rpc.call, + self.context, + 'test', + {"method": "fail", + "args": {"value": value}}) + try: + rpc.call(self.context, + 'test', + {"method": "fail", + "args": {"value": value}}) + self.fail("should have thrown rpc.RemoteError") + except rpc.RemoteError as exc: + self.assertEqual(int(exc.value), value) + + def test_nested_calls(self): + """Test that we can do an rpc.call inside another call.""" + class Nested(object): + @staticmethod + def echo(context, queue, value): + """Calls echo in the passed queue""" + LOG.debug(_("Nested received %(queue)s, %(value)s") + % locals()) + # TODO: so, it will replay the context and use the same REQID? + # that's bizarre. + ret = rpc.call(context, + queue, + {"method": "echo", + "args": {"value": value}}) + LOG.debug(_("Nested return %s"), ret) + return value + + nested = Nested() + conn = rpc.create_connection(True) + rpc.create_consumer(conn, + 'nested', + nested, + False) + conn.consume_in_thread() + value = 42 + result = rpc.call(self.context, + 'nested', {"method": "echo", + "args": {"queue": "test", + "value": value}}) + conn.close() + self.assertEqual(value, result) + + +class TestReceiver(object): + """Simple Proxy class so the consumer has methods to call. + + Uses static methods because we aren't actually storing any state. + + """ + + @staticmethod + def echo(context, value): + """Simply returns whatever value is sent in.""" + LOG.debug(_("Received %s"), value) + return value + + @staticmethod + def context(context, value): + """Returns dictionary version of context.""" + LOG.debug(_("Received %s"), context) + return context.to_dict() + + @staticmethod + def echo_three_times(context, value): + context.reply(value) + context.reply(value + 1) + context.reply(value + 2) + + @staticmethod + def echo_three_times_yield(context, value): + yield value + yield value + 1 + yield value + 2 + + @staticmethod + def fail(context, value): + """Raises an exception with the value sent in.""" + raise Exception(value) diff --git a/nova/tests/test_rpc_kombu.py b/nova/tests/test_rpc_kombu.py new file mode 100644 index 000000000..457dfdeca --- /dev/null +++ b/nova/tests/test_rpc_kombu.py @@ -0,0 +1,266 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +""" +Unit Tests for remote procedure calls using queue +""" + +from nova import context +from nova import log as logging +from nova.rpc import impl_kombu as rpc +from nova import test + + +LOG = logging.getLogger('nova.tests.rpc') + + +class RpcKombuTestCase(test.TestCase): + def setUp(self): + super(RpcKombuTestCase, self).setUp() + self.conn = rpc.create_connection() + self.receiver = TestReceiver() + rpc.create_consumer(self.conn, + 'test', + self.receiver, + False) + self.conn.consume_in_thread() + self.context = context.get_admin_context() + + def tearDown(self): + self.conn.close() + super(RpcKombuTestCase, self).tearDown() + + def test_reusing_connection(self): + """Test that reusing a connection returns same one.""" + conn_context = rpc.create_connection(new=False) + conn1 = conn_context.connection + conn_context.close() + conn_context = rpc.create_connection(new=False) + conn2 = conn_context.connection + conn_context.close() + self.assertEqual(conn1, conn2) + + def test_topic_send_receive(self): + """Test sending to a topic exchange/queue""" + + conn = rpc.create_connection() + message = 'topic test message' + + self.received_message = None + + def _callback(message): + self.received_message = message + + conn.declare_topic_consumer('a_topic', _callback) + conn.topic_send('a_topic', message) + conn.consume(limit=1) + conn.close() + + self.assertEqual(self.received_message, message) + + def test_direct_send_receive(self): + """Test sending to a direct exchange/queue""" + conn = rpc.create_connection() + message = 'direct test message' + + self.received_message = None + + def _callback(message): + self.received_message = message + + conn.declare_direct_consumer('a_direct', _callback) + conn.direct_send('a_direct', message) + conn.consume(limit=1) + conn.close() + + self.assertEqual(self.received_message, message) + + @test.skip_test("kombu memory transport seems buggy with fanout queues " + "as this test passes when you use rabbit (fake_rabbit=False)") + def test_fanout_send_receive(self): + """Test sending to a fanout exchange and consuming from 2 queues""" + + conn = rpc.create_connection() + conn2 = rpc.create_connection() + message = 'fanout test message' + + self.received_message = None + + def _callback(message): + self.received_message = message + + conn.declare_fanout_consumer('a_fanout', _callback) + conn2.declare_fanout_consumer('a_fanout', _callback) + conn.fanout_send('a_fanout', message) + + conn.consume(limit=1) + conn.close() + self.assertEqual(self.received_message, message) + + self.received_message = None + conn2.consume(limit=1) + conn2.close() + self.assertEqual(self.received_message, message) + + def test_call_succeed(self): + value = 42 + result = rpc.call(self.context, 'test', {"method": "echo", + "args": {"value": value}}) + self.assertEqual(value, result) + + def test_call_succeed_despite_multiple_returns(self): + value = 42 + result = rpc.call(self.context, 'test', {"method": "echo_three_times", + "args": {"value": value}}) + self.assertEqual(value + 2, result) + + def test_call_succeed_despite_multiple_returns_yield(self): + value = 42 + result = rpc.call(self.context, 'test', + {"method": "echo_three_times_yield", + "args": {"value": value}}) + self.assertEqual(value + 2, result) + + def test_multicall_succeed_once(self): + value = 42 + result = rpc.multicall(self.context, + 'test', + {"method": "echo", + "args": {"value": value}}) + for i, x in enumerate(result): + if i > 0: + self.fail('should only receive one response') + self.assertEqual(value + i, x) + + def test_multicall_succeed_three_times(self): + value = 42 + result = rpc.multicall(self.context, + 'test', + {"method": "echo_three_times", + "args": {"value": value}}) + for i, x in enumerate(result): + self.assertEqual(value + i, x) + + def test_multicall_succeed_three_times_yield(self): + value = 42 + result = rpc.multicall(self.context, + 'test', + {"method": "echo_three_times_yield", + "args": {"value": value}}) + for i, x in enumerate(result): + self.assertEqual(value + i, x) + + def test_context_passed(self): + """Makes sure a context is passed through rpc call.""" + value = 42 + result = rpc.call(self.context, + 'test', {"method": "context", + "args": {"value": value}}) + self.assertEqual(self.context.to_dict(), result) + + def test_call_exception(self): + """Test that exception gets passed back properly. + + rpc.call returns a RemoteError object. The value of the + exception is converted to a string, so we convert it back + to an int in the test. + + """ + value = 42 + self.assertRaises(rpc.RemoteError, + rpc.call, + self.context, + 'test', + {"method": "fail", + "args": {"value": value}}) + try: + rpc.call(self.context, + 'test', + {"method": "fail", + "args": {"value": value}}) + self.fail("should have thrown rpc.RemoteError") + except rpc.RemoteError as exc: + self.assertEqual(int(exc.value), value) + + def test_nested_calls(self): + """Test that we can do an rpc.call inside another call.""" + class Nested(object): + @staticmethod + def echo(context, queue, value): + """Calls echo in the passed queue""" + LOG.debug(_("Nested received %(queue)s, %(value)s") + % locals()) + # TODO: so, it will replay the context and use the same REQID? + # that's bizarre. + ret = rpc.call(context, + queue, + {"method": "echo", + "args": {"value": value}}) + LOG.debug(_("Nested return %s"), ret) + return value + + nested = Nested() + conn = rpc.create_connection(True) + rpc.create_consumer(conn, + 'nested', + nested, + False) + conn.consume_in_thread() + value = 42 + result = rpc.call(self.context, + 'nested', {"method": "echo", + "args": {"queue": "test", + "value": value}}) + conn.close() + self.assertEqual(value, result) + + +class TestReceiver(object): + """Simple Proxy class so the consumer has methods to call. + + Uses static methods because we aren't actually storing any state. + + """ + + @staticmethod + def echo(context, value): + """Simply returns whatever value is sent in.""" + LOG.debug(_("Received %s"), value) + return value + + @staticmethod + def context(context, value): + """Returns dictionary version of context.""" + LOG.debug(_("Received %s"), context) + return context.to_dict() + + @staticmethod + def echo_three_times(context, value): + context.reply(value) + context.reply(value + 1) + context.reply(value + 2) + + @staticmethod + def echo_three_times_yield(context, value): + yield value + yield value + 1 + yield value + 2 + + @staticmethod + def fail(context, value): + """Raises an exception with the value sent in.""" + raise Exception(value) -- cgit From 4c2e9ae35b22e7ef2e3fdd20ed72bac115510ada Mon Sep 17 00:00:00 2001 From: Chris Behrens Date: Sun, 28 Aug 2011 19:23:31 -0700 Subject: carrot consumer thread fix --- nova/rpc/impl_carrot.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/nova/rpc/impl_carrot.py b/nova/rpc/impl_carrot.py index 07af0a116..d0e6f8269 100644 --- a/nova/rpc/impl_carrot.py +++ b/nova/rpc/impl_carrot.py @@ -118,13 +118,13 @@ class Connection(carrot_connection.BrokerConnection): consumer_set.wait() except greenlet.GreenletExit: return - if not self._rpc_consumer_thread: + if self._rpc_consumer_thread is None: self._rpc_consumer_thread = eventlet.spawn(_consumer_thread) return self._rpc_consumer_thread def cancel_consumer_thread(self): """Cancel a consumer thread""" - if self._rpc_consumer_thread: + if self._rpc_consumer_thread is not None: self._rpc_consumer_thread.kill() try: self._rpc_consumer_thread.wait() -- cgit From 53f796e0cfcec9d5c56dca86ee3c185625917dca Mon Sep 17 00:00:00 2001 From: Chris Behrens Date: Sun, 28 Aug 2011 19:27:49 -0700 Subject: remove unused rpc connections in test_cloud and test_adminapi --- nova/tests/test_adminapi.py | 2 -- nova/tests/test_cloud.py | 2 -- 2 files changed, 4 deletions(-) diff --git a/nova/tests/test_adminapi.py b/nova/tests/test_adminapi.py index 06cc498ac..aaa633adc 100644 --- a/nova/tests/test_adminapi.py +++ b/nova/tests/test_adminapi.py @@ -38,8 +38,6 @@ class AdminApiTestCase(test.TestCase): super(AdminApiTestCase, self).setUp() self.flags(connection_type='fake') - self.conn = rpc.create_connection() - # set up our cloud self.api = admin.AdminController() diff --git a/nova/tests/test_cloud.py b/nova/tests/test_cloud.py index 0793784f8..14ab64f33 100644 --- a/nova/tests/test_cloud.py +++ b/nova/tests/test_cloud.py @@ -51,8 +51,6 @@ class CloudTestCase(test.TestCase): self.flags(connection_type='fake', stub_network=True) - self.conn = rpc.create_connection() - # set up our cloud self.cloud = cloud.CloudController() -- cgit From 25cd526a72a98f184ed57fc85e7be2997305ce31 Mon Sep 17 00:00:00 2001 From: Chris Behrens Date: Mon, 29 Aug 2011 00:12:30 -0700 Subject: pep8 fixes --- nova/flags.py | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/nova/flags.py b/nova/flags.py index e09b4721a..774da4ab4 100644 --- a/nova/flags.py +++ b/nova/flags.py @@ -302,9 +302,12 @@ DEFINE_bool('rabbit_use_ssl', False, 'connect over SSL') DEFINE_string('rabbit_userid', 'guest', 'rabbit userid') DEFINE_string('rabbit_password', 'guest', 'rabbit password') DEFINE_string('rabbit_virtual_host', '/', 'rabbit virtual host') -DEFINE_integer('rabbit_retry_interval', 1, 'rabbit connection retry interval to start') -DEFINE_integer('rabbit_retry_backoff', 2, 'rabbit connection retry backoff in seconds') -DEFINE_integer('rabbit_max_retries', 0, 'maximum rabbit connection attempts (0=try forever)') +DEFINE_integer('rabbit_retry_interval', 1, + 'rabbit connection retry interval to start') +DEFINE_integer('rabbit_retry_backoff', 2, + 'rabbit connection retry backoff in seconds') +DEFINE_integer('rabbit_max_retries', 0, + 'maximum rabbit connection attempts (0=try forever)') DEFINE_string('control_exchange', 'nova', 'the main exchange to connect to') DEFINE_boolean('rabbit_durable_queues', False, 'use durable queues') DEFINE_list('enabled_apis', ['ec2', 'osapi'], -- cgit From 39ca3df042bd3fa9a8ae2bf97d9383be7360d900 Mon Sep 17 00:00:00 2001 From: Brian Lamar Date: Mon, 29 Aug 2011 09:45:00 -0400 Subject: Increased migration number. --- .../versions/043_update_instance_states.py | 138 --------------------- .../versions/044_update_instance_states.py | 138 +++++++++++++++++++++ 2 files changed, 138 insertions(+), 138 deletions(-) delete mode 100644 nova/db/sqlalchemy/migrate_repo/versions/043_update_instance_states.py create mode 100644 nova/db/sqlalchemy/migrate_repo/versions/044_update_instance_states.py diff --git a/nova/db/sqlalchemy/migrate_repo/versions/043_update_instance_states.py b/nova/db/sqlalchemy/migrate_repo/versions/043_update_instance_states.py deleted file mode 100644 index e58ae5362..000000000 --- a/nova/db/sqlalchemy/migrate_repo/versions/043_update_instance_states.py +++ /dev/null @@ -1,138 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright 2010 OpenStack LLC. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import sqlalchemy -from sqlalchemy import MetaData, Table, Column, String - -from nova.compute import task_states -from nova.compute import vm_states - - -meta = MetaData() - - -c_task_state = Column('task_state', - String(length=255, convert_unicode=False, - assert_unicode=None, unicode_error=None, - _warn_on_bytestring=False), - nullable=True) - - -_upgrade_translations = { - "stopping": { - "state_description": vm_states.ACTIVE, - "task_state": task_states.STOPPING, - }, - "stopped": { - "state_description": vm_states.STOPPED, - "task_state": None, - }, - "terminated": { - "state_description": vm_states.DELETED, - "task_state": None, - }, - "terminating": { - "state_description": vm_states.ACTIVE, - "task_state": task_states.DELETING, - }, - "running": { - "state_description": vm_states.ACTIVE, - "task_state": None, - }, - "scheduling": { - "state_description": vm_states.BUILDING, - "task_state": task_states.SCHEDULING, - }, - "migrating": { - "state_description": vm_states.MIGRATING, - "task_state": None, - }, - "pending": { - "state_description": vm_states.BUILDING, - "task_state": task_states.SCHEDULING, - }, -} - - -_downgrade_translations = { - vm_states.ACTIVE: { - None: "running", - task_states.DELETING: "terminating", - task_states.STOPPING: "stopping", - }, - vm_states.BUILDING: { - None: "pending", - task_states.SCHEDULING: "scheduling", - }, - vm_states.STOPPED: { - None: "stopped", - }, - vm_states.REBUILDING: { - None: "pending", - }, - vm_states.DELETED: { - None: "terminated", - }, - vm_states.MIGRATING: { - None: "migrating", - }, -} - - -def upgrade(migrate_engine): - meta.bind = migrate_engine - - instance_table = Table('instances', meta, autoload=True, - autoload_with=migrate_engine) - - c_state = instance_table.c.state - c_state.alter(name='power_state') - - c_vm_state = instance_table.c.state_description - c_vm_state.alter(name='vm_state') - - instance_table.create_column(c_task_state) - - for old_state, values in _upgrade_translations.iteritems(): - instance_table.update().\ - values(**values).\ - where(c_vm_state == old_state).\ - execute() - - -def downgrade(migrate_engine): - meta.bind = migrate_engine - - instance_table = Table('instances', meta, autoload=True, - autoload_with=migrate_engine) - - c_task_state = instance_table.c.task_state - - c_state = instance_table.c.power_state - c_state.alter(name='state') - - c_vm_state = instance_table.c.vm_state - c_vm_state.alter(name='state_description') - - for old_vm_state, old_task_states in _downgrade_translations.iteritems(): - for old_task_state, new_state_desc in old_task_states.iteritems(): - instance_table.update().\ - where(c_task_state == old_task_state).\ - where(c_vm_state == old_vm_state).\ - values(vm_state=new_state_desc).\ - execute() - - instance_table.drop_column('task_state') diff --git a/nova/db/sqlalchemy/migrate_repo/versions/044_update_instance_states.py b/nova/db/sqlalchemy/migrate_repo/versions/044_update_instance_states.py new file mode 100644 index 000000000..e58ae5362 --- /dev/null +++ b/nova/db/sqlalchemy/migrate_repo/versions/044_update_instance_states.py @@ -0,0 +1,138 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 OpenStack LLC. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import sqlalchemy +from sqlalchemy import MetaData, Table, Column, String + +from nova.compute import task_states +from nova.compute import vm_states + + +meta = MetaData() + + +c_task_state = Column('task_state', + String(length=255, convert_unicode=False, + assert_unicode=None, unicode_error=None, + _warn_on_bytestring=False), + nullable=True) + + +_upgrade_translations = { + "stopping": { + "state_description": vm_states.ACTIVE, + "task_state": task_states.STOPPING, + }, + "stopped": { + "state_description": vm_states.STOPPED, + "task_state": None, + }, + "terminated": { + "state_description": vm_states.DELETED, + "task_state": None, + }, + "terminating": { + "state_description": vm_states.ACTIVE, + "task_state": task_states.DELETING, + }, + "running": { + "state_description": vm_states.ACTIVE, + "task_state": None, + }, + "scheduling": { + "state_description": vm_states.BUILDING, + "task_state": task_states.SCHEDULING, + }, + "migrating": { + "state_description": vm_states.MIGRATING, + "task_state": None, + }, + "pending": { + "state_description": vm_states.BUILDING, + "task_state": task_states.SCHEDULING, + }, +} + + +_downgrade_translations = { + vm_states.ACTIVE: { + None: "running", + task_states.DELETING: "terminating", + task_states.STOPPING: "stopping", + }, + vm_states.BUILDING: { + None: "pending", + task_states.SCHEDULING: "scheduling", + }, + vm_states.STOPPED: { + None: "stopped", + }, + vm_states.REBUILDING: { + None: "pending", + }, + vm_states.DELETED: { + None: "terminated", + }, + vm_states.MIGRATING: { + None: "migrating", + }, +} + + +def upgrade(migrate_engine): + meta.bind = migrate_engine + + instance_table = Table('instances', meta, autoload=True, + autoload_with=migrate_engine) + + c_state = instance_table.c.state + c_state.alter(name='power_state') + + c_vm_state = instance_table.c.state_description + c_vm_state.alter(name='vm_state') + + instance_table.create_column(c_task_state) + + for old_state, values in _upgrade_translations.iteritems(): + instance_table.update().\ + values(**values).\ + where(c_vm_state == old_state).\ + execute() + + +def downgrade(migrate_engine): + meta.bind = migrate_engine + + instance_table = Table('instances', meta, autoload=True, + autoload_with=migrate_engine) + + c_task_state = instance_table.c.task_state + + c_state = instance_table.c.power_state + c_state.alter(name='state') + + c_vm_state = instance_table.c.vm_state + c_vm_state.alter(name='state_description') + + for old_vm_state, old_task_states in _downgrade_translations.iteritems(): + for old_task_state, new_state_desc in old_task_states.iteritems(): + instance_table.update().\ + where(c_task_state == old_task_state).\ + where(c_vm_state == old_vm_state).\ + values(vm_state=new_state_desc).\ + execute() + + instance_table.drop_column('task_state') -- cgit From 0972d9188b0b73fa357f75896ab3bebda9a2a9de Mon Sep 17 00:00:00 2001 From: Brian Lamar Date: Mon, 29 Aug 2011 10:13:39 -0400 Subject: Removed test_parallel_builds --- nova/tests/test_xenapi.py | 37 ------------------------------------- 1 file changed, 37 deletions(-) diff --git a/nova/tests/test_xenapi.py b/nova/tests/test_xenapi.py index 2f0559366..45dad3516 100644 --- a/nova/tests/test_xenapi.py +++ b/nova/tests/test_xenapi.py @@ -16,7 +16,6 @@ """Test suite for XenAPI.""" -import eventlet import functools import json import os @@ -203,42 +202,6 @@ class XenAPIVMTestCase(test.TestCase): self.context = context.RequestContext(self.user_id, self.project_id) self.conn = xenapi_conn.get_connection(False) - def test_parallel_builds(self): - stubs.stubout_loopingcall_delay(self.stubs) - - def _do_build(id, proj, user, *args): - values = { - 'id': id, - 'project_id': proj, - 'user_id': user, - 'image_ref': 1, - 'kernel_id': 2, - 'ramdisk_id': 3, - 'instance_type_id': '3', # m1.large - 'os_type': 'linux', - 'architecture': 'x86-64'} - network_info = [({'bridge': 'fa0', 'id': 0, 'injected': False}, - {'broadcast': '192.168.0.255', - 'dns': ['192.168.0.1'], - 'gateway': '192.168.0.1', - 'gateway6': 'dead:beef::1', - 'ip6s': [{'enabled': '1', - 'ip': 'dead:beef::dcad:beff:feef:0', - 'netmask': '64'}], - 'ips': [{'enabled': '1', - 'ip': '192.168.0.100', - 'netmask': '255.255.255.0'}], - 'label': 'fake', - 'mac': 'DE:AD:BE:EF:00:00', - 'rxtx_cap': 3})] - instance = db.instance_create(self.context, values) - self.conn.spawn(self.context, instance, network_info) - - gt1 = eventlet.spawn(_do_build, 1, self.project_id, self.user_id) - gt2 = eventlet.spawn(_do_build, 2, self.project_id, self.user_id) - gt1.wait() - gt2.wait() - def test_list_instances_0(self): instances = self.conn.list_instances() self.assertEquals(instances, []) -- cgit -- cgit From 024b76a8df5c96d37dea0a05f66dfe4628a64a28 Mon Sep 17 00:00:00 2001 From: Sandy Walsh Date: Mon, 29 Aug 2011 10:27:25 -0700 Subject: more logging info to help identify bad payloads --- nova/notifier/api.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/nova/notifier/api.py b/nova/notifier/api.py index 6ef4a050e..043838536 100644 --- a/nova/notifier/api.py +++ b/nova/notifier/api.py @@ -122,4 +122,5 @@ def notify(publisher_id, event_type, priority, payload): driver.notify(msg) except Exception, e: LOG.exception(_("Problem '%(e)s' attempting to " - "send to notification system." % locals())) + "send to notification system. Payload=%(payload)s" % + locals())) -- cgit From ee15f2a58217d522e23d811db4958e2e9b2338d6 Mon Sep 17 00:00:00 2001 From: Chris Behrens Date: Mon, 29 Aug 2011 14:36:12 -0700 Subject: ditched rpc.create_consumer(conn) interface... instead you now do conn.create_consumer(.. --- nova/rpc/__init__.py | 6 +---- nova/rpc/impl_carrot.py | 29 +++++++++++------------ nova/rpc/impl_kombu.py | 55 +++++++++++++++++++++---------------------- nova/service.py | 8 +++---- nova/tests/test_rpc.py | 10 ++------ nova/tests/test_rpc_carrot.py | 10 ++------ nova/tests/test_rpc_kombu.py | 10 ++------ nova/tests/test_test.py | 3 +-- 8 files changed, 52 insertions(+), 79 deletions(-) diff --git a/nova/rpc/__init__.py b/nova/rpc/__init__.py index 2a47ba87b..fe50fb476 100644 --- a/nova/rpc/__init__.py +++ b/nova/rpc/__init__.py @@ -24,7 +24,7 @@ from nova import flags FLAGS = flags.FLAGS flags.DEFINE_string('rpc_backend', 'kombu', - "The messaging module to use, defaults to carrot.") + "The messaging module to use, defaults to kombu.") impl_table = {'kombu': 'nova.rpc.impl_kombu', 'amqp': 'nova.rpc.impl_kombu', @@ -41,10 +41,6 @@ def create_connection(new=True): return RPCIMPL.create_connection(new=new) -def create_consumer(conn, topic, proxy, fanout=False): - RPCIMPL.create_consumer(conn, topic, proxy, fanout) - - def call(context, topic, msg): return RPCIMPL.call(context, topic, msg) diff --git a/nova/rpc/impl_carrot.py b/nova/rpc/impl_carrot.py index d0e6f8269..6d504aaec 100644 --- a/nova/rpc/impl_carrot.py +++ b/nova/rpc/impl_carrot.py @@ -132,6 +132,20 @@ class Connection(carrot_connection.BrokerConnection): pass self._rpc_consumer_thread = None + def create_consumer(self, topic, proxy, fanout=False): + """Create a consumer that calls methods in the proxy""" + if fanout: + consumer = FanoutAdapterConsumer( + connection=self, + topic=topic, + proxy=proxy) + else: + consumer = TopicAdapterConsumer( + connection=self, + topic=topic, + proxy=proxy) + self._rpc_consumers.append(consumer) + class Pool(pools.Pool): """Class that implements a Pool of Connections.""" @@ -187,7 +201,6 @@ class Consumer(messaging.Consumer): LOG.error(_('Unable to connect to AMQP server ' 'after %(tries)d tries. Shutting down.') % locals()) sys.exit(1) - connection._rpc_consumers.append(self) def fetch(self, no_ack=None, auto_ack=None, enable_callbacks=False): """Wraps the parent fetch with some logic for failed connection.""" @@ -568,20 +581,6 @@ def create_connection(new=True): return Connection.instance(new=new) -def create_consumer(conn, topic, proxy, fanout=False): - """Create a consumer that calls methods in the proxy""" - if fanout: - return FanoutAdapterConsumer( - connection=conn, - topic=topic, - proxy=proxy) - else: - return TopicAdapterConsumer( - connection=conn, - topic=topic, - proxy=proxy) - - def call(context, topic, msg): """Sends a message on a topic and wait for a response.""" rv = multicall(context, topic, msg) diff --git a/nova/rpc/impl_kombu.py b/nova/rpc/impl_kombu.py index 49bca1d81..83ee1b122 100644 --- a/nova/rpc/impl_kombu.py +++ b/nova/rpc/impl_kombu.py @@ -404,26 +404,6 @@ class Connection(object): '%s' % str(e))) self.reconnect() - def consume(self, limit=None): - """Consume from all queues/consumers""" - it = self.iterconsume(limit=limit) - while True: - try: - it.next() - except StopIteration: - return - - def consume_in_thread(self): - """Consumer from all queues/consumers in a greenthread""" - def _consumer_thread(): - try: - self.consume() - except greenlet.GreenletExit: - return - if self.consumer_thread is None: - self.consumer_thread = eventlet.spawn(_consumer_thread) - return self.consumer_thread - def cancel_consumer_thread(self): """Cancel a consumer thread""" if self.consumer_thread is not None: @@ -478,6 +458,33 @@ class Connection(object): """Send a 'fanout' message""" self.publisher_send(FanoutPublisher, topic, msg) + def consume(self, limit=None): + """Consume from all queues/consumers""" + it = self.iterconsume(limit=limit) + while True: + try: + it.next() + except StopIteration: + return + + def consume_in_thread(self): + """Consumer from all queues/consumers in a greenthread""" + def _consumer_thread(): + try: + self.consume() + except greenlet.GreenletExit: + return + if self.consumer_thread is None: + self.consumer_thread = eventlet.spawn(_consumer_thread) + return self.consumer_thread + + def create_consumer(self, topic, proxy, fanout=False): + """Create a consumer that calls a method in a proxy object""" + if fanout: + self.declare_fanout_consumer(topic, ProxyCallback(proxy)) + else: + self.declare_topic_consumer(topic, ProxyCallback(proxy)) + class Pool(pools.Pool): """Class that implements a Pool of Connections.""" @@ -678,14 +685,6 @@ def create_connection(new=True): return ConnectionContext(pooled=not new) -def create_consumer(conn, topic, proxy, fanout=False): - """Create a consumer that calls a method in a proxy object""" - if fanout: - conn.declare_fanout_consumer(topic, ProxyCallback(proxy)) - else: - conn.declare_topic_consumer(topic, ProxyCallback(proxy)) - - def multicall(context, topic, msg): """Make a call that returns multiple times.""" # Can't use 'with' for multicall, as it returns an iterator diff --git a/nova/service.py b/nova/service.py index ab7925eb3..247eb4fb1 100644 --- a/nova/service.py +++ b/nova/service.py @@ -153,14 +153,12 @@ class Service(object): self.topic) # Share this same connection for these Consumers - rpc.create_consumer(self.conn, self.topic, self, - fanout=False) + self.conn.create_consumer(self.topic, self, fanout=False) node_topic = '%s.%s' % (self.topic, self.host) - rpc.create_consumer(self.conn, node_topic, self, - fanout=False) + self.conn.create_consumer(node_topic, self, fanout=False) - rpc.create_consumer(self.conn, self.topic, self, fanout=True) + self.conn.create_consumer(self.topic, self, fanout=True) # Consume from all consumers in a thread self.conn.consume_in_thread() diff --git a/nova/tests/test_rpc.py b/nova/tests/test_rpc.py index 2b9922491..ba91ea3b2 100644 --- a/nova/tests/test_rpc.py +++ b/nova/tests/test_rpc.py @@ -33,10 +33,7 @@ class RpcTestCase(test.TestCase): super(RpcTestCase, self).setUp() self.conn = rpc.create_connection(True) self.receiver = TestReceiver() - rpc.create_consumer(self.conn, - 'test', - self.receiver, - False) + self.conn.create_consumer('test', self.receiver, False) self.conn.consume_in_thread() self.context = context.get_admin_context() @@ -143,10 +140,7 @@ class RpcTestCase(test.TestCase): nested = Nested() conn = rpc.create_connection(True) - rpc.create_consumer(conn, - 'nested', - nested, - False) + conn.create_consumer('nested', nested, False) conn.consume_in_thread() value = 42 result = rpc.call(self.context, diff --git a/nova/tests/test_rpc_carrot.py b/nova/tests/test_rpc_carrot.py index cf84980ab..ff704ecf8 100644 --- a/nova/tests/test_rpc_carrot.py +++ b/nova/tests/test_rpc_carrot.py @@ -33,10 +33,7 @@ class RpcCarrotTestCase(test.TestCase): super(RpcCarrotTestCase, self).setUp() self.conn = rpc.create_connection(True) self.receiver = TestReceiver() - rpc.create_consumer(self.conn, - 'test', - self.receiver, - False) + self.conn.create_consumer('test', self.receiver, False) self.conn.consume_in_thread() self.context = context.get_admin_context() @@ -151,10 +148,7 @@ class RpcCarrotTestCase(test.TestCase): nested = Nested() conn = rpc.create_connection(True) - rpc.create_consumer(conn, - 'nested', - nested, - False) + conn.create_consumer('nested', nested, False) conn.consume_in_thread() value = 42 result = rpc.call(self.context, diff --git a/nova/tests/test_rpc_kombu.py b/nova/tests/test_rpc_kombu.py index 457dfdeca..7db88ecd0 100644 --- a/nova/tests/test_rpc_kombu.py +++ b/nova/tests/test_rpc_kombu.py @@ -33,10 +33,7 @@ class RpcKombuTestCase(test.TestCase): super(RpcKombuTestCase, self).setUp() self.conn = rpc.create_connection() self.receiver = TestReceiver() - rpc.create_consumer(self.conn, - 'test', - self.receiver, - False) + self.conn.create_consumer('test', self.receiver, False) self.conn.consume_in_thread() self.context = context.get_admin_context() @@ -215,10 +212,7 @@ class RpcKombuTestCase(test.TestCase): nested = Nested() conn = rpc.create_connection(True) - rpc.create_consumer(conn, - 'nested', - nested, - False) + conn.create_consumer('nested', nested, False) conn.consume_in_thread() value = 42 result = rpc.call(self.context, diff --git a/nova/tests/test_test.py b/nova/tests/test_test.py index 6075abbb0..3482ff6a0 100644 --- a/nova/tests/test_test.py +++ b/nova/tests/test_test.py @@ -40,6 +40,5 @@ class IsolationTestCase(test.TestCase): connection = rpc.create_connection(new=True) proxy = NeverCalled() - rpc.create_consumer(connection, 'compute', - proxy, fanout=False) + connection.create_consumer('compute', proxy, fanout=False) connection.consume_in_thread() -- cgit From a16efa7b94a15040657b961b0fd29a4d2720ef21 Mon Sep 17 00:00:00 2001 From: Chris Behrens Date: Mon, 29 Aug 2011 14:54:20 -0700 Subject: created nova/tests/test_rpc_common.py which contains a rpc test base class so we can share tests between the rpc implementations --- nova/tests/test_rpc.py | 157 +---------------------------------- nova/tests/test_rpc_carrot.py | 169 ++----------------------------------- nova/tests/test_rpc_common.py | 188 ++++++++++++++++++++++++++++++++++++++++++ nova/tests/test_rpc_kombu.py | 172 +++----------------------------------- 4 files changed, 211 insertions(+), 475 deletions(-) create mode 100644 nova/tests/test_rpc_common.py diff --git a/nova/tests/test_rpc.py b/nova/tests/test_rpc.py index ba91ea3b2..6b4454747 100644 --- a/nova/tests/test_rpc.py +++ b/nova/tests/test_rpc.py @@ -22,167 +22,16 @@ Unit Tests for remote procedure calls using queue from nova import context from nova import log as logging from nova import rpc -from nova import test +from nova.tests import test_rpc_common LOG = logging.getLogger('nova.tests.rpc') -class RpcTestCase(test.TestCase): +class RpcTestCase(test_rpc_common._BaseRpcTestCase): def setUp(self): + self.rpc = rpc super(RpcTestCase, self).setUp() - self.conn = rpc.create_connection(True) - self.receiver = TestReceiver() - self.conn.create_consumer('test', self.receiver, False) - self.conn.consume_in_thread() - self.context = context.get_admin_context() def tearDown(self): - self.conn.close() super(RpcTestCase, self).tearDown() - - def test_call_succeed(self): - value = 42 - result = rpc.call(self.context, 'test', {"method": "echo", - "args": {"value": value}}) - self.assertEqual(value, result) - - def test_call_succeed_despite_multiple_returns(self): - value = 42 - result = rpc.call(self.context, 'test', {"method": "echo_three_times", - "args": {"value": value}}) - self.assertEqual(value + 2, result) - - def test_call_succeed_despite_multiple_returns_yield(self): - value = 42 - result = rpc.call(self.context, 'test', - {"method": "echo_three_times_yield", - "args": {"value": value}}) - self.assertEqual(value + 2, result) - - def test_multicall_succeed_once(self): - value = 42 - result = rpc.multicall(self.context, - 'test', - {"method": "echo", - "args": {"value": value}}) - for i, x in enumerate(result): - if i > 0: - self.fail('should only receive one response') - self.assertEqual(value + i, x) - - def test_multicall_succeed_three_times(self): - value = 42 - result = rpc.multicall(self.context, - 'test', - {"method": "echo_three_times", - "args": {"value": value}}) - for i, x in enumerate(result): - self.assertEqual(value + i, x) - - def test_multicall_succeed_three_times_yield(self): - value = 42 - result = rpc.multicall(self.context, - 'test', - {"method": "echo_three_times_yield", - "args": {"value": value}}) - for i, x in enumerate(result): - self.assertEqual(value + i, x) - - def test_context_passed(self): - """Makes sure a context is passed through rpc call.""" - value = 42 - result = rpc.call(self.context, - 'test', {"method": "context", - "args": {"value": value}}) - self.assertEqual(self.context.to_dict(), result) - - def test_call_exception(self): - """Test that exception gets passed back properly. - - rpc.call returns a RemoteError object. The value of the - exception is converted to a string, so we convert it back - to an int in the test. - - """ - value = 42 - self.assertRaises(rpc.RemoteError, - rpc.call, - self.context, - 'test', - {"method": "fail", - "args": {"value": value}}) - try: - rpc.call(self.context, - 'test', - {"method": "fail", - "args": {"value": value}}) - self.fail("should have thrown rpc.RemoteError") - except rpc.RemoteError as exc: - self.assertEqual(int(exc.value), value) - - def test_nested_calls(self): - """Test that we can do an rpc.call inside another call.""" - class Nested(object): - @staticmethod - def echo(context, queue, value): - """Calls echo in the passed queue""" - LOG.debug(_("Nested received %(queue)s, %(value)s") - % locals()) - # TODO: so, it will replay the context and use the same REQID? - # that's bizarre. - ret = rpc.call(context, - queue, - {"method": "echo", - "args": {"value": value}}) - LOG.debug(_("Nested return %s"), ret) - return value - - nested = Nested() - conn = rpc.create_connection(True) - conn.create_consumer('nested', nested, False) - conn.consume_in_thread() - value = 42 - result = rpc.call(self.context, - 'nested', {"method": "echo", - "args": {"queue": "test", - "value": value}}) - conn.close() - self.assertEqual(value, result) - - -class TestReceiver(object): - """Simple Proxy class so the consumer has methods to call. - - Uses static methods because we aren't actually storing any state. - - """ - - @staticmethod - def echo(context, value): - """Simply returns whatever value is sent in.""" - LOG.debug(_("Received %s"), value) - return value - - @staticmethod - def context(context, value): - """Returns dictionary version of context.""" - LOG.debug(_("Received %s"), context) - return context.to_dict() - - @staticmethod - def echo_three_times(context, value): - context.reply(value) - context.reply(value + 1) - context.reply(value + 2) - - @staticmethod - def echo_three_times_yield(context, value): - yield value - yield value + 1 - yield value + 2 - - @staticmethod - def fail(context, value): - """Raises an exception with the value sent in.""" - raise Exception(value) diff --git a/nova/tests/test_rpc_carrot.py b/nova/tests/test_rpc_carrot.py index ff704ecf8..57cdebf4f 100644 --- a/nova/tests/test_rpc_carrot.py +++ b/nova/tests/test_rpc_carrot.py @@ -16,181 +16,30 @@ # License for the specific language governing permissions and limitations # under the License. """ -Unit Tests for remote procedure calls using queue +Unit Tests for remote procedure calls using carrot """ from nova import context from nova import log as logging -from nova.rpc import impl_carrot as rpc -from nova import test +from nova.rpc import impl_carrot +from nova.tests import test_rpc_common LOG = logging.getLogger('nova.tests.rpc') -class RpcCarrotTestCase(test.TestCase): +class RpcCarrotTestCase(test_rpc_common._BaseRpcTestCase): def setUp(self): + self.rpc = impl_carrot super(RpcCarrotTestCase, self).setUp() - self.conn = rpc.create_connection(True) - self.receiver = TestReceiver() - self.conn.create_consumer('test', self.receiver, False) - self.conn.consume_in_thread() - self.context = context.get_admin_context() def tearDown(self): - self.conn.close() super(RpcCarrotTestCase, self).tearDown() def test_connectionpool_single(self): """Test that ConnectionPool recycles a single connection.""" - conn1 = rpc.ConnectionPool.get() - rpc.ConnectionPool.put(conn1) - conn2 = rpc.ConnectionPool.get() - rpc.ConnectionPool.put(conn2) + conn1 = self.rpc.ConnectionPool.get() + self.rpc.ConnectionPool.put(conn1) + conn2 = self.rpc.ConnectionPool.get() + self.rpc.ConnectionPool.put(conn2) self.assertEqual(conn1, conn2) - - def test_call_succeed(self): - value = 42 - result = rpc.call(self.context, 'test', {"method": "echo", - "args": {"value": value}}) - self.assertEqual(value, result) - - def test_call_succeed_despite_multiple_returns(self): - value = 42 - result = rpc.call(self.context, 'test', {"method": "echo_three_times", - "args": {"value": value}}) - self.assertEqual(value + 2, result) - - def test_call_succeed_despite_multiple_returns_yield(self): - value = 42 - result = rpc.call(self.context, 'test', - {"method": "echo_three_times_yield", - "args": {"value": value}}) - self.assertEqual(value + 2, result) - - def test_multicall_succeed_once(self): - value = 42 - result = rpc.multicall(self.context, - 'test', - {"method": "echo", - "args": {"value": value}}) - for i, x in enumerate(result): - if i > 0: - self.fail('should only receive one response') - self.assertEqual(value + i, x) - - def test_multicall_succeed_three_times(self): - value = 42 - result = rpc.multicall(self.context, - 'test', - {"method": "echo_three_times", - "args": {"value": value}}) - for i, x in enumerate(result): - self.assertEqual(value + i, x) - - def test_multicall_succeed_three_times_yield(self): - value = 42 - result = rpc.multicall(self.context, - 'test', - {"method": "echo_three_times_yield", - "args": {"value": value}}) - for i, x in enumerate(result): - self.assertEqual(value + i, x) - - def test_context_passed(self): - """Makes sure a context is passed through rpc call.""" - value = 42 - result = rpc.call(self.context, - 'test', {"method": "context", - "args": {"value": value}}) - self.assertEqual(self.context.to_dict(), result) - - def test_call_exception(self): - """Test that exception gets passed back properly. - - rpc.call returns a RemoteError object. The value of the - exception is converted to a string, so we convert it back - to an int in the test. - - """ - value = 42 - self.assertRaises(rpc.RemoteError, - rpc.call, - self.context, - 'test', - {"method": "fail", - "args": {"value": value}}) - try: - rpc.call(self.context, - 'test', - {"method": "fail", - "args": {"value": value}}) - self.fail("should have thrown rpc.RemoteError") - except rpc.RemoteError as exc: - self.assertEqual(int(exc.value), value) - - def test_nested_calls(self): - """Test that we can do an rpc.call inside another call.""" - class Nested(object): - @staticmethod - def echo(context, queue, value): - """Calls echo in the passed queue""" - LOG.debug(_("Nested received %(queue)s, %(value)s") - % locals()) - # TODO: so, it will replay the context and use the same REQID? - # that's bizarre. - ret = rpc.call(context, - queue, - {"method": "echo", - "args": {"value": value}}) - LOG.debug(_("Nested return %s"), ret) - return value - - nested = Nested() - conn = rpc.create_connection(True) - conn.create_consumer('nested', nested, False) - conn.consume_in_thread() - value = 42 - result = rpc.call(self.context, - 'nested', {"method": "echo", - "args": {"queue": "test", - "value": value}}) - conn.close() - self.assertEqual(value, result) - - -class TestReceiver(object): - """Simple Proxy class so the consumer has methods to call. - - Uses static methods because we aren't actually storing any state. - - """ - - @staticmethod - def echo(context, value): - """Simply returns whatever value is sent in.""" - LOG.debug(_("Received %s"), value) - return value - - @staticmethod - def context(context, value): - """Returns dictionary version of context.""" - LOG.debug(_("Received %s"), context) - return context.to_dict() - - @staticmethod - def echo_three_times(context, value): - context.reply(value) - context.reply(value + 1) - context.reply(value + 2) - - @staticmethod - def echo_three_times_yield(context, value): - yield value - yield value + 1 - yield value + 2 - - @staticmethod - def fail(context, value): - """Raises an exception with the value sent in.""" - raise Exception(value) diff --git a/nova/tests/test_rpc_common.py b/nova/tests/test_rpc_common.py new file mode 100644 index 000000000..b922be1df --- /dev/null +++ b/nova/tests/test_rpc_common.py @@ -0,0 +1,188 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +""" +Unit Tests for remote procedure calls shared between all implementations +""" + +from nova import context +from nova import log as logging +from nova.rpc.common import RemoteError +from nova import test + + +LOG = logging.getLogger('nova.tests.rpc') + + +class _BaseRpcTestCase(test.TestCase): + def setUp(self): + super(_BaseRpcTestCase, self).setUp() + self.conn = self.rpc.create_connection(True) + self.receiver = TestReceiver() + self.conn.create_consumer('test', self.receiver, False) + self.conn.consume_in_thread() + self.context = context.get_admin_context() + + def tearDown(self): + self.conn.close() + super(_BaseRpcTestCase, self).tearDown() + + def test_call_succeed(self): + value = 42 + result = self.rpc.call(self.context, 'test', {"method": "echo", + "args": {"value": value}}) + self.assertEqual(value, result) + + def test_call_succeed_despite_multiple_returns(self): + value = 42 + result = self.rpc.call(self.context, 'test', {"method": "echo_three_times", + "args": {"value": value}}) + self.assertEqual(value + 2, result) + + def test_call_succeed_despite_multiple_returns_yield(self): + value = 42 + result = self.rpc.call(self.context, 'test', + {"method": "echo_three_times_yield", + "args": {"value": value}}) + self.assertEqual(value + 2, result) + + def test_multicall_succeed_once(self): + value = 42 + result = self.rpc.multicall(self.context, + 'test', + {"method": "echo", + "args": {"value": value}}) + for i, x in enumerate(result): + if i > 0: + self.fail('should only receive one response') + self.assertEqual(value + i, x) + + def test_multicall_succeed_three_times(self): + value = 42 + result = self.rpc.multicall(self.context, + 'test', + {"method": "echo_three_times", + "args": {"value": value}}) + for i, x in enumerate(result): + self.assertEqual(value + i, x) + + def test_multicall_succeed_three_times_yield(self): + value = 42 + result = self.rpc.multicall(self.context, + 'test', + {"method": "echo_three_times_yield", + "args": {"value": value}}) + for i, x in enumerate(result): + self.assertEqual(value + i, x) + + def test_context_passed(self): + """Makes sure a context is passed through rpc call.""" + value = 42 + result = self.rpc.call(self.context, + 'test', {"method": "context", + "args": {"value": value}}) + self.assertEqual(self.context.to_dict(), result) + + def test_call_exception(self): + """Test that exception gets passed back properly. + + rpc.call returns a RemoteError object. The value of the + exception is converted to a string, so we convert it back + to an int in the test. + + """ + value = 42 + self.assertRaises(RemoteError, + self.rpc.call, + self.context, + 'test', + {"method": "fail", + "args": {"value": value}}) + try: + self.rpc.call(self.context, + 'test', + {"method": "fail", + "args": {"value": value}}) + self.fail("should have thrown RemoteError") + except RemoteError as exc: + self.assertEqual(int(exc.value), value) + + def test_nested_calls(self): + """Test that we can do an rpc.call inside another call.""" + class Nested(object): + @staticmethod + def echo(context, queue, value): + """Calls echo in the passed queue""" + LOG.debug(_("Nested received %(queue)s, %(value)s") + % locals()) + # TODO: so, it will replay the context and use the same REQID? + # that's bizarre. + ret = self.rpc.call(context, + queue, + {"method": "echo", + "args": {"value": value}}) + LOG.debug(_("Nested return %s"), ret) + return value + + nested = Nested() + conn = self.rpc.create_connection(True) + conn.create_consumer('nested', nested, False) + conn.consume_in_thread() + value = 42 + result = self.rpc.call(self.context, + 'nested', {"method": "echo", + "args": {"queue": "test", + "value": value}}) + conn.close() + self.assertEqual(value, result) + + +class TestReceiver(object): + """Simple Proxy class so the consumer has methods to call. + + Uses static methods because we aren't actually storing any state. + + """ + + @staticmethod + def echo(context, value): + """Simply returns whatever value is sent in.""" + LOG.debug(_("Received %s"), value) + return value + + @staticmethod + def context(context, value): + """Returns dictionary version of context.""" + LOG.debug(_("Received %s"), context) + return context.to_dict() + + @staticmethod + def echo_three_times(context, value): + context.reply(value) + context.reply(value + 1) + context.reply(value + 2) + + @staticmethod + def echo_three_times_yield(context, value): + yield value + yield value + 1 + yield value + 2 + + @staticmethod + def fail(context, value): + """Raises an exception with the value sent in.""" + raise Exception(value) diff --git a/nova/tests/test_rpc_kombu.py b/nova/tests/test_rpc_kombu.py index 7db88ecd0..101ed14af 100644 --- a/nova/tests/test_rpc_kombu.py +++ b/nova/tests/test_rpc_kombu.py @@ -16,37 +16,33 @@ # License for the specific language governing permissions and limitations # under the License. """ -Unit Tests for remote procedure calls using queue +Unit Tests for remote procedure calls using kombu """ from nova import context from nova import log as logging -from nova.rpc import impl_kombu as rpc from nova import test +from nova.rpc import impl_kombu +from nova.tests import test_rpc_common LOG = logging.getLogger('nova.tests.rpc') -class RpcKombuTestCase(test.TestCase): +class RpcKombuTestCase(test_rpc_common._BaseRpcTestCase): def setUp(self): + self.rpc = impl_kombu super(RpcKombuTestCase, self).setUp() - self.conn = rpc.create_connection() - self.receiver = TestReceiver() - self.conn.create_consumer('test', self.receiver, False) - self.conn.consume_in_thread() - self.context = context.get_admin_context() def tearDown(self): - self.conn.close() super(RpcKombuTestCase, self).tearDown() def test_reusing_connection(self): """Test that reusing a connection returns same one.""" - conn_context = rpc.create_connection(new=False) + conn_context = self.rpc.create_connection(new=False) conn1 = conn_context.connection conn_context.close() - conn_context = rpc.create_connection(new=False) + conn_context = self.rpc.create_connection(new=False) conn2 = conn_context.connection conn_context.close() self.assertEqual(conn1, conn2) @@ -54,7 +50,7 @@ class RpcKombuTestCase(test.TestCase): def test_topic_send_receive(self): """Test sending to a topic exchange/queue""" - conn = rpc.create_connection() + conn = self.rpc.create_connection() message = 'topic test message' self.received_message = None @@ -71,7 +67,7 @@ class RpcKombuTestCase(test.TestCase): def test_direct_send_receive(self): """Test sending to a direct exchange/queue""" - conn = rpc.create_connection() + conn = self.rpc.create_connection() message = 'direct test message' self.received_message = None @@ -91,8 +87,8 @@ class RpcKombuTestCase(test.TestCase): def test_fanout_send_receive(self): """Test sending to a fanout exchange and consuming from 2 queues""" - conn = rpc.create_connection() - conn2 = rpc.create_connection() + conn = self.rpc.create_connection() + conn2 = self.rpc.create_connection() message = 'fanout test message' self.received_message = None @@ -112,149 +108,3 @@ class RpcKombuTestCase(test.TestCase): conn2.consume(limit=1) conn2.close() self.assertEqual(self.received_message, message) - - def test_call_succeed(self): - value = 42 - result = rpc.call(self.context, 'test', {"method": "echo", - "args": {"value": value}}) - self.assertEqual(value, result) - - def test_call_succeed_despite_multiple_returns(self): - value = 42 - result = rpc.call(self.context, 'test', {"method": "echo_three_times", - "args": {"value": value}}) - self.assertEqual(value + 2, result) - - def test_call_succeed_despite_multiple_returns_yield(self): - value = 42 - result = rpc.call(self.context, 'test', - {"method": "echo_three_times_yield", - "args": {"value": value}}) - self.assertEqual(value + 2, result) - - def test_multicall_succeed_once(self): - value = 42 - result = rpc.multicall(self.context, - 'test', - {"method": "echo", - "args": {"value": value}}) - for i, x in enumerate(result): - if i > 0: - self.fail('should only receive one response') - self.assertEqual(value + i, x) - - def test_multicall_succeed_three_times(self): - value = 42 - result = rpc.multicall(self.context, - 'test', - {"method": "echo_three_times", - "args": {"value": value}}) - for i, x in enumerate(result): - self.assertEqual(value + i, x) - - def test_multicall_succeed_three_times_yield(self): - value = 42 - result = rpc.multicall(self.context, - 'test', - {"method": "echo_three_times_yield", - "args": {"value": value}}) - for i, x in enumerate(result): - self.assertEqual(value + i, x) - - def test_context_passed(self): - """Makes sure a context is passed through rpc call.""" - value = 42 - result = rpc.call(self.context, - 'test', {"method": "context", - "args": {"value": value}}) - self.assertEqual(self.context.to_dict(), result) - - def test_call_exception(self): - """Test that exception gets passed back properly. - - rpc.call returns a RemoteError object. The value of the - exception is converted to a string, so we convert it back - to an int in the test. - - """ - value = 42 - self.assertRaises(rpc.RemoteError, - rpc.call, - self.context, - 'test', - {"method": "fail", - "args": {"value": value}}) - try: - rpc.call(self.context, - 'test', - {"method": "fail", - "args": {"value": value}}) - self.fail("should have thrown rpc.RemoteError") - except rpc.RemoteError as exc: - self.assertEqual(int(exc.value), value) - - def test_nested_calls(self): - """Test that we can do an rpc.call inside another call.""" - class Nested(object): - @staticmethod - def echo(context, queue, value): - """Calls echo in the passed queue""" - LOG.debug(_("Nested received %(queue)s, %(value)s") - % locals()) - # TODO: so, it will replay the context and use the same REQID? - # that's bizarre. - ret = rpc.call(context, - queue, - {"method": "echo", - "args": {"value": value}}) - LOG.debug(_("Nested return %s"), ret) - return value - - nested = Nested() - conn = rpc.create_connection(True) - conn.create_consumer('nested', nested, False) - conn.consume_in_thread() - value = 42 - result = rpc.call(self.context, - 'nested', {"method": "echo", - "args": {"queue": "test", - "value": value}}) - conn.close() - self.assertEqual(value, result) - - -class TestReceiver(object): - """Simple Proxy class so the consumer has methods to call. - - Uses static methods because we aren't actually storing any state. - - """ - - @staticmethod - def echo(context, value): - """Simply returns whatever value is sent in.""" - LOG.debug(_("Received %s"), value) - return value - - @staticmethod - def context(context, value): - """Returns dictionary version of context.""" - LOG.debug(_("Received %s"), context) - return context.to_dict() - - @staticmethod - def echo_three_times(context, value): - context.reply(value) - context.reply(value + 1) - context.reply(value + 2) - - @staticmethod - def echo_three_times_yield(context, value): - yield value - yield value + 1 - yield value + 2 - - @staticmethod - def fail(context, value): - """Raises an exception with the value sent in.""" - raise Exception(value) -- cgit From db27d93bc195598a5dd0e7a35480281447cf4ea1 Mon Sep 17 00:00:00 2001 From: Chris Behrens Date: Mon, 29 Aug 2011 15:08:32 -0700 Subject: doc string cleanup --- nova/rpc/impl_kombu.py | 29 +++++++++++++++++++++++------ 1 file changed, 23 insertions(+), 6 deletions(-) diff --git a/nova/rpc/impl_kombu.py b/nova/rpc/impl_kombu.py index 83ee1b122..ffd6447da 100644 --- a/nova/rpc/impl_kombu.py +++ b/nova/rpc/impl_kombu.py @@ -290,7 +290,7 @@ class FanoutPublisher(Publisher): class Connection(object): - """Connection instance object.""" + """Connection object.""" def __init__(self): self.consumers = [] @@ -503,7 +503,18 @@ ConnectionPool = Pool( class ConnectionContext(object): + """The class that is actually returned to the caller of + create_connection(). This is a essentially a wrapper around + Connection that supports 'with' and can return a new Connection or + one from a pool. It will also catch when an instance of this class + is to be deleted so that we can return Connections to the pool on + exceptions and so forth without making the caller be responsible for + catching all exceptions and making sure to return a connection to + the pool. + """ + def __init__(self, pooled=True): + """Create a new connection, or get one from the pool""" self.connection = None if pooled: self.connection = ConnectionPool.get() @@ -512,9 +523,13 @@ class ConnectionContext(object): self.pooled = pooled def __enter__(self): + """with ConnectionContext() should return self""" return self def _done(self): + """If the connection came from a pool, clean it up and put it back. + If it did not come from a pool, close it. + """ if self.connection: if self.pooled: # Reset the connection so it's ready for the next caller @@ -533,19 +548,19 @@ class ConnectionContext(object): self.connection = None def __exit__(self, t, v, tb): - """end if 'with' statement. We're done here.""" + """end of 'with' statement. We're done here.""" self._done() def __del__(self): - """Put Connection back into the pool if this ConnectionContext - is being deleted - """ + """Caller is done with this connection. Make sure we cleaned up.""" self._done() def close(self): + """Caller is done with this connection.""" self._done() def __getattr__(self, key): + """Proxy all other calls to the Connection instance""" if self.connection: return getattr(self.connection, key) else: @@ -637,6 +652,7 @@ def _pack_context(msg, context): class RpcContext(context.RequestContext): + """Context that supports replying to a rpc.call""" def __init__(self, *args, **kwargs): msg_id = kwargs.pop('msg_id', None) self.msg_id = msg_id @@ -656,7 +672,7 @@ class MulticallWaiter(object): def done(self): self._done = True - self._connection = None + self._connection.close() def __call__(self, data): """The consume() callback will call this. Store the result.""" @@ -666,6 +682,7 @@ class MulticallWaiter(object): self._result = data['result'] def __iter__(self): + """Return a result until we get a 'None' response from consumer""" if self._done: raise StopIteration while True: -- cgit From 468ed475207b023cfa3eada48338d34375f55be2 Mon Sep 17 00:00:00 2001 From: Chris Behrens Date: Mon, 29 Aug 2011 15:15:58 -0700 Subject: fix ajax console proxy for new create_consumer method --- bin/nova-ajax-console-proxy | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/bin/nova-ajax-console-proxy b/bin/nova-ajax-console-proxy index b3205ec56..23fb42fb5 100755 --- a/bin/nova-ajax-console-proxy +++ b/bin/nova-ajax-console-proxy @@ -114,8 +114,7 @@ class AjaxConsoleProxy(object): {'args': kwargs, 'last_activity': time.time()} self.conn = rpc.create_connection(new=True) - rpc.create_consumer( - self.conn, + self.conn.create_consumer( FLAGS.ajax_console_proxy_topic, TopicProxy) -- cgit From 345afb31678a1f94fcca6d63a4ab506e537c3a9c Mon Sep 17 00:00:00 2001 From: Chris Behrens Date: Mon, 29 Aug 2011 15:25:54 -0700 Subject: pep8 fix for test_rpc_common.py --- nova/tests/test_rpc_common.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/nova/tests/test_rpc_common.py b/nova/tests/test_rpc_common.py index b922be1df..4ab4e8a0e 100644 --- a/nova/tests/test_rpc_common.py +++ b/nova/tests/test_rpc_common.py @@ -49,8 +49,9 @@ class _BaseRpcTestCase(test.TestCase): def test_call_succeed_despite_multiple_returns(self): value = 42 - result = self.rpc.call(self.context, 'test', {"method": "echo_three_times", - "args": {"value": value}}) + result = self.rpc.call(self.context, 'test', + {"method": "echo_three_times", + "args": {"value": value}}) self.assertEqual(value + 2, result) def test_call_succeed_despite_multiple_returns_yield(self): -- cgit From 8965e567ce25e6b9718f1bca60b35f586bab985f Mon Sep 17 00:00:00 2001 From: Chris Behrens Date: Mon, 29 Aug 2011 15:26:26 -0700 Subject: remove unneeded connection= in carrot Consumer init --- nova/rpc/impl_carrot.py | 1 - 1 file changed, 1 deletion(-) diff --git a/nova/rpc/impl_carrot.py b/nova/rpc/impl_carrot.py index 6d504aaec..1d23c1853 100644 --- a/nova/rpc/impl_carrot.py +++ b/nova/rpc/impl_carrot.py @@ -171,7 +171,6 @@ class Consumer(messaging.Consumer): """ def __init__(self, *args, **kwargs): - connection = kwargs.get('connection') max_retries = FLAGS.rabbit_max_retries sleep_time = FLAGS.rabbit_retry_interval tries = 0 -- cgit From ebd47b7cb397f33c1e7c9f32dd5b77f7fd5d6642 Mon Sep 17 00:00:00 2001 From: Dan Prince Date: Mon, 29 Aug 2011 22:27:28 -0400 Subject: Update RequestContext so that it correctly sets self.is_admin from the roles array. Additionally add a bit of code to ignore case as well. Resolves issues when accessing admin API's w/ Keystone. --- nova/context.py | 2 +- nova/tests/test_context.py | 33 +++++++++++++++++++++++++++++++++ 2 files changed, 34 insertions(+), 1 deletion(-) create mode 100644 nova/tests/test_context.py diff --git a/nova/context.py b/nova/context.py index b917a1d81..5c22641a0 100644 --- a/nova/context.py +++ b/nova/context.py @@ -38,7 +38,7 @@ class RequestContext(object): self.roles = roles or [] self.is_admin = is_admin if self.is_admin is None: - self.admin = 'admin' in self.roles + self.is_admin = 'admin' in [x.lower() for x in self.roles] self.read_deleted = read_deleted self.remote_address = remote_address if not timestamp: diff --git a/nova/tests/test_context.py b/nova/tests/test_context.py new file mode 100644 index 000000000..b2507fa59 --- /dev/null +++ b/nova/tests/test_context.py @@ -0,0 +1,33 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2011 OpenStack LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from nova import context +from nova import test + + +class ContextTestCase(test.TestCase): + + def test_request_context_sets_is_admin(self): + ctxt = context.RequestContext('111', + '222', + roles=['admin', 'weasel']) + self.assertEquals(ctxt.is_admin, True) + + def test_request_context_sets_is_admin_upcase(self): + ctxt = context.RequestContext('111', + '222', + roles=['Admin', 'weasel']) + self.assertEquals(ctxt.is_admin, True) -- cgit From a635027ddbeb73dfad8bbf2890f67cb1ed7511bf Mon Sep 17 00:00:00 2001 From: Anthony Young Date: Tue, 30 Aug 2011 00:03:39 -0700 Subject: disassociate floating ips before re-associating, and prevent re-association of already associated floating ips in manager --- nova/exception.py | 4 ++ nova/network/api.py | 6 ++ nova/network/manager.py | 7 +++ .../api/openstack/contrib/test_floating_ips.py | 68 ++++++++++++++++++++-- nova/tests/test_network.py | 16 +++++ 5 files changed, 96 insertions(+), 5 deletions(-) diff --git a/nova/exception.py b/nova/exception.py index 32981f4d5..b54981963 100644 --- a/nova/exception.py +++ b/nova/exception.py @@ -533,6 +533,10 @@ class NoMoreFloatingIps(FloatingIpNotFound): message = _("Zero floating ips available.") +class FloatingIpAlreadyInUse(NovaException): + message = _("Floating ip %(address) already in use by %(fixed_ip).") + + class NoFloatingIpsDefined(NotFound): message = _("Zero floating ips exist.") diff --git a/nova/network/api.py b/nova/network/api.py index d04474df3..78580d360 100644 --- a/nova/network/api.py +++ b/nova/network/api.py @@ -111,6 +111,12 @@ class API(base.Base): '(%(project)s)') % {'address': floating_ip['address'], 'project': context.project_id}) + + # If this address has been previously associated to a + # different instance, disassociate the floating_ip + if floating_ip['fixed_ip'] and floating_ip['fixed_ip'] is not fixed_ip: + self.disassociate_floating_ip(context, floating_ip['address']) + # NOTE(vish): if we are multi_host, send to the instances host if fixed_ip['network']['multi_host']: host = fixed_ip['instance']['host'] diff --git a/nova/network/manager.py b/nova/network/manager.py index b4605eea5..e6b30d1a0 100644 --- a/nova/network/manager.py +++ b/nova/network/manager.py @@ -280,6 +280,13 @@ class FloatingIP(object): def associate_floating_ip(self, context, floating_address, fixed_address): """Associates an floating ip to a fixed ip.""" + floating_ip = self.db.floating_ip_get_by_address(context, + floating_address) + if floating_ip['fixed_ip']: + raise exception.FloatingIpAlreadyInUse( + address=floating_ip['address'], + fixed_ip=floating_ip['fixed_ip']['address']) + self.db.floating_ip_fixed_ip_associate(context, floating_address, fixed_address) diff --git a/nova/tests/api/openstack/contrib/test_floating_ips.py b/nova/tests/api/openstack/contrib/test_floating_ips.py index 568faf867..c14c29bf3 100644 --- a/nova/tests/api/openstack/contrib/test_floating_ips.py +++ b/nova/tests/api/openstack/contrib/test_floating_ips.py @@ -23,6 +23,7 @@ from nova import db from nova import test from nova import network from nova.tests.api.openstack import fakes +from nova.tests.api.openstack import test_servers from nova.api.openstack.contrib.floating_ips import FloatingIPController @@ -60,10 +61,38 @@ def compute_api_associate(self, context, instance_id, floating_ip): pass +def network_api_associate(self, context, floating_ip, fixed_ip): + pass + + def network_api_disassociate(self, context, floating_address): pass +def network_get_instance_nw_info(self, context, instance): + info = { + 'label': 'fake', + 'gateway': 'fake', + 'dhcp_server': 'fake', + 'broadcast': 'fake', + 'mac': 'fake', + 'vif_uuid': 'fake', + 'rxtx_cap': 'fake', + 'dns': [], + 'ips': [{'ip': '10.0.0.1'}], + 'should_create_bridge': False, + 'should_create_vlan': False} + + return [['ignore', info]] + + +def fake_instance_get(context, instance_id): + return { + "id": 1, + "user_id": 'fakeuser', + "project_id": '123'} + + class FloatingIpTest(test.TestCase): address = "10.10.10.10" @@ -79,9 +108,6 @@ class FloatingIpTest(test.TestCase): def setUp(self): super(FloatingIpTest, self).setUp() - self.controller = FloatingIPController() - fakes.stub_out_networking(self.stubs) - fakes.stub_out_rate_limiting(self.stubs) self.stubs.Set(network.api.API, "get_floating_ip", network_api_get_floating_ip) self.stubs.Set(network.api.API, "get_floating_ip_by_ip", @@ -92,10 +118,13 @@ class FloatingIpTest(test.TestCase): network_api_allocate) self.stubs.Set(network.api.API, "release_floating_ip", network_api_release) - self.stubs.Set(compute.api.API, "associate_floating_ip", - compute_api_associate) self.stubs.Set(network.api.API, "disassociate_floating_ip", network_api_disassociate) + self.stubs.Set(network.api.API, "get_instance_nw_info", + network_get_instance_nw_info) + self.stubs.Set(db.api, 'instance_get', + fake_instance_get) + self.context = context.get_admin_context() self._create_floating_ip() @@ -165,6 +194,8 @@ class FloatingIpTest(test.TestCase): self.assertEqual(res.status_int, 202) def test_add_floating_ip_to_instance(self): + self.stubs.Set(network.api.API, "associate_floating_ip", + network_api_associate) body = dict(addFloatingIp=dict(address='11.0.0.1')) req = webob.Request.blank('/v1.1/123/servers/test_inst/action') req.method = "POST" @@ -174,6 +205,33 @@ class FloatingIpTest(test.TestCase): resp = req.get_response(fakes.wsgi_app()) self.assertEqual(resp.status_int, 202) + def test_add_associated_floating_ip_to_instance(self): + def fake_fixed_ip_get_by_address(ctx, address, session=None): + return {'address': address, 'network': {'multi_host': None, + 'host': 'fake'}} + + self.disassociated = False + + def fake_network_api_disassociate(local_self, ctx, floating_address): + self.disassociated = True + + db.floating_ip_update(self.context, self.address, {'project_id': '123', + 'fixed_ip_id': 1}) + self.stubs.Set(network.api.API, "disassociate_floating_ip", + fake_network_api_disassociate) + self.stubs.Set(db.api, "fixed_ip_get_by_address", + fake_fixed_ip_get_by_address) + + body = dict(addFloatingIp=dict(address=self.address)) + req = webob.Request.blank('/v1.1/123/servers/test_inst/action') + req.method = "POST" + req.body = json.dumps(body) + req.headers["content-type"] = "application/json" + + resp = req.get_response(fakes.wsgi_app()) + self.assertEqual(resp.status_int, 202) + self.assertTrue(self.disassociated) + def test_remove_floating_ip_from_instance(self): body = dict(removeFloatingIp=dict(address='11.0.0.1')) req = webob.Request.blank('/v1.1/123/servers/test_inst/action') diff --git a/nova/tests/test_network.py b/nova/tests/test_network.py index 0b8539442..25ff940f0 100644 --- a/nova/tests/test_network.py +++ b/nova/tests/test_network.py @@ -371,6 +371,22 @@ class VlanNetworkTestCase(test.TestCase): self.mox.ReplayAll() self.network.validate_networks(self.context, requested_networks) + def test_cant_associate_associated_floating_ip(self): + ctxt = context.RequestContext('testuser', 'testproject', + is_admin=False) + + def fake_floating_ip_get_by_address(context, address): + return {'address': '10.10.10.10', + 'fixed_ip': {'address': '10.0.0.1'}} + self.stubs.Set(self.network.db, 'floating_ip_get_by_address', + fake_floating_ip_get_by_address) + + self.assertRaises(exception.FloatingIpAlreadyInUse, + self.network.associate_floating_ip, + ctxt, + mox.IgnoreArg(), + mox.IgnoreArg()) + class CommonNetworkTestCase(test.TestCase): -- cgit From fdbb12e1e4b0b2cc28344510afb1c57620240901 Mon Sep 17 00:00:00 2001 From: Brian Lamar Date: Tue, 30 Aug 2011 10:42:51 -0400 Subject: Fix a bad merge on my part, this fixes rebuilds\! --- nova/compute/manager.py | 7 +------ 1 file changed, 1 insertion(+), 6 deletions(-) diff --git a/nova/compute/manager.py b/nova/compute/manager.py index 167be66db..0477db745 100644 --- a/nova/compute/manager.py +++ b/nova/compute/manager.py @@ -547,9 +547,6 @@ class ComputeManager(manager.SchedulerDependentManager): vm_state=vm_states.REBUILDING, task_state=task_states.BLOCK_DEVICE_MAPPING) - image_ref = kwargs.get('image_ref') - instance_ref.image_ref = image_ref - instance_ref.injected_files = kwargs.get('injected_files', []) network_info = self.network_api.get_instance_nw_info(context, instance_ref) @@ -572,11 +569,9 @@ class ComputeManager(manager.SchedulerDependentManager): power_state=current_power_state, vm_state=vm_states.ACTIVE, task_state=None, - image_ref=image_ref, launched_at=utils.utcnow()) - usage_info = utils.usage_from_instance(instance_ref, - image_ref=image_ref) + usage_info = utils.usage_from_instance(instance_ref) notifier.notify('compute.%s' % self.host, 'compute.instance.rebuild', notifier.INFO, -- cgit From 1155b734164eb5856d68c926f7bf64a37ae4a3a4 Mon Sep 17 00:00:00 2001 From: Brian Waldon Date: Tue, 30 Aug 2011 11:13:25 -0400 Subject: supporting changes-since --- nova/api/openstack/servers.py | 24 +++++++++++++----------- nova/db/sqlalchemy/api.py | 12 ++++++++---- nova/tests/api/openstack/test_servers.py | 24 ++++++++++++++++++++++++ 3 files changed, 45 insertions(+), 15 deletions(-) diff --git a/nova/api/openstack/servers.py b/nova/api/openstack/servers.py index 27c67e79e..e0e40679a 100644 --- a/nova/api/openstack/servers.py +++ b/nova/api/openstack/servers.py @@ -107,6 +107,14 @@ class Controller(object): LOG.error(reason) raise exception.InvalidInput(reason=reason) + if 'changes-since' in search_opts: + try: + parsed = utils.parse_isotime(search_opts['changes-since']) + except ValueError: + msg = _('Invalid changes-since value') + raise exc.HTTPBadRequest(explanation=msg) + search_opts['changes-since'] = parsed + # By default, compute's get_all() will return deleted instances. # If an admin hasn't specified a 'deleted' search option, we need # to filter out deleted instances by setting the filter ourselves. @@ -114,23 +122,17 @@ class Controller(object): # should return recently deleted images according to the API spec. if 'deleted' not in search_opts: - # Admin hasn't specified deleted filter if 'changes-since' not in search_opts: - # No 'changes-since', so we need to find non-deleted servers + # No 'changes-since', so we only want non-deleted servers search_opts['deleted'] = False - else: - # This is the default, but just in case.. - search_opts['deleted'] = True - - instance_list = self.compute_api.get_all( - context, search_opts=search_opts) - # FIXME(comstud): 'changes-since' is not fully implemented. Where - # should this be filtered? + instance_list = self.compute_api.get_all(context, + search_opts=search_opts) limited_list = self._limit_items(instance_list, req) servers = [self._build_view(req, inst, is_detail)['server'] - for inst in limited_list] + for inst in limited_list] + return dict(servers=servers) @scheduler_api.redirect_handler diff --git a/nova/db/sqlalchemy/api.py b/nova/db/sqlalchemy/api.py index 65b09a65d..1685e9928 100644 --- a/nova/db/sqlalchemy/api.py +++ b/nova/db/sqlalchemy/api.py @@ -35,6 +35,7 @@ from sqlalchemy.exc import IntegrityError from sqlalchemy.orm import joinedload from sqlalchemy.orm import joinedload_all from sqlalchemy.sql import func +from sqlalchemy.sql.expression import desc from sqlalchemy.sql.expression import literal_column FLAGS = flags.FLAGS @@ -1250,12 +1251,17 @@ def instance_get_all_by_filters(context, filters): options(joinedload_all('fixed_ips.network')).\ options(joinedload('metadata')).\ options(joinedload('instance_type')).\ - filter_by(deleted=can_read_deleted(context)) + order_by(desc(models.Instance.updated_at)) # Make a copy of the filters dictionary to use going forward, as we'll # be modifying it and we shouldn't affect the caller's use of it. filters = filters.copy() + if 'changes-since' in filters: + changes_since = filters['changes-since'] + query_prefix = query_prefix.\ + filter(models.Instance.updated_at > changes_since) + if not context.is_admin: # If we're not admin context, add appropriate filter.. if context.project_id: @@ -1277,9 +1283,7 @@ def instance_get_all_by_filters(context, filters): query_prefix = _exact_match_filter(query_prefix, filter_name, filters.pop(filter_name)) - instances = query_prefix.\ - filter_by(deleted=can_read_deleted(context)).\ - all() + instances = query_prefix.all() if not instances: return [] diff --git a/nova/tests/api/openstack/test_servers.py b/nova/tests/api/openstack/test_servers.py index 3559e6de5..9036d6552 100644 --- a/nova/tests/api/openstack/test_servers.py +++ b/nova/tests/api/openstack/test_servers.py @@ -1257,6 +1257,30 @@ class ServersTest(test.TestCase): self.assertEqual(len(servers), 1) self.assertEqual(servers[0]['id'], 100) + def test_get_servers_allows_changes_since_v1_1(self): + def fake_get_all(compute_self, context, search_opts=None): + self.assertNotEqual(search_opts, None) + self.assertTrue('changes-since' in search_opts) + changes_since = datetime.datetime(2011, 1, 24, 17, 8, 1) + self.assertEqual(search_opts['changes-since'], changes_since) + return [stub_instance(100)] + + self.stubs.Set(nova.compute.API, 'get_all', fake_get_all) + + params = 'changes-since=2011-01-24T17:08:01Z' + req = webob.Request.blank('/v1.1/fake/servers?%s' % params) + res = req.get_response(fakes.wsgi_app()) + self.assertEqual(res.status_int, 200) + servers = json.loads(res.body)['servers'] + self.assertEqual(len(servers), 1) + self.assertEqual(servers[0]['id'], 100) + + def test_get_servers_allows_changes_since_bad_value_v1_1(self): + params = 'changes-since=asdf' + req = webob.Request.blank('/v1.1/fake/servers?%s' % params) + res = req.get_response(fakes.wsgi_app()) + self.assertEqual(res.status_int, 400) + def test_get_servers_unknown_or_admin_options1(self): """Test getting servers by admin-only or unknown options. This tests when admin_api is off. Make sure the admin and -- cgit From a127747db0ab3405a768e8f680a2eb94ae8ce314 Mon Sep 17 00:00:00 2001 From: Brian Waldon Date: Tue, 30 Aug 2011 11:44:19 -0400 Subject: adding an assert --- nova/tests/api/openstack/test_servers.py | 1 + 1 file changed, 1 insertion(+) diff --git a/nova/tests/api/openstack/test_servers.py b/nova/tests/api/openstack/test_servers.py index 9036d6552..5f1ca466a 100644 --- a/nova/tests/api/openstack/test_servers.py +++ b/nova/tests/api/openstack/test_servers.py @@ -1263,6 +1263,7 @@ class ServersTest(test.TestCase): self.assertTrue('changes-since' in search_opts) changes_since = datetime.datetime(2011, 1, 24, 17, 8, 1) self.assertEqual(search_opts['changes-since'], changes_since) + self.assertTrue('deleted' not in search_opts) return [stub_instance(100)] self.stubs.Set(nova.compute.API, 'get_all', fake_get_all) -- cgit From 1c6d74a08dbb5b472e85e3d3a1fe2b3b8b9b89e3 Mon Sep 17 00:00:00 2001 From: Brian Waldon Date: Tue, 30 Aug 2011 12:51:02 -0400 Subject: changing default sort to created_at --- nova/db/sqlalchemy/api.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nova/db/sqlalchemy/api.py b/nova/db/sqlalchemy/api.py index 1685e9928..1e55d08e7 100644 --- a/nova/db/sqlalchemy/api.py +++ b/nova/db/sqlalchemy/api.py @@ -1251,7 +1251,7 @@ def instance_get_all_by_filters(context, filters): options(joinedload_all('fixed_ips.network')).\ options(joinedload('metadata')).\ options(joinedload('instance_type')).\ - order_by(desc(models.Instance.updated_at)) + order_by(desc(models.Instance.created_at)) # Make a copy of the filters dictionary to use going forward, as we'll # be modifying it and we shouldn't affect the caller's use of it. -- cgit From 980ae6aa2f3797e428beee6e383d8bd134175734 Mon Sep 17 00:00:00 2001 From: Brian Waldon Date: Tue, 30 Aug 2011 14:48:02 -0400 Subject: yielding all the images --- nova/image/glance.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/nova/image/glance.py b/nova/image/glance.py index 9060f6a91..f2f246ba6 100644 --- a/nova/image/glance.py +++ b/nova/image/glance.py @@ -153,7 +153,8 @@ class GlanceImageService(service.BaseImageService): except KeyError: raise exception.ImagePaginationFailed() - self._fetch_images(fetch_func, **kwargs) + for image in self._fetch_images(fetch_func, **kwargs): + yield image def show(self, context, image_id): """Returns a dict with image data for the given opaque image id.""" -- cgit From 09fd29a9cc29904679cc8921adaf7559c23f347f Mon Sep 17 00:00:00 2001 From: Brian Waldon Date: Tue, 30 Aug 2011 14:52:23 -0400 Subject: fixing short-ciruit condition --- nova/image/glance.py | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/nova/image/glance.py b/nova/image/glance.py index f2f246ba6..b5f52351f 100644 --- a/nova/image/glance.py +++ b/nova/image/glance.py @@ -141,12 +141,13 @@ class GlanceImageService(service.BaseImageService): """Paginate through results from glance server""" images = fetch_func(**kwargs) - for image in images: - yield image - else: + if not images: # break out of recursive loop to end pagination return + for image in images: + yield image + try: # attempt to advance the marker in order to fetch next page kwargs['marker'] = images[-1]['id'] -- cgit -- cgit From dcf5970dd9bed27201c593d7d053970a632e5eee Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Tue, 30 Aug 2011 12:01:18 -0700 Subject: make two functions instead of fast flag and add compute api commands instead of hitting db directly --- bin/instance-usage-audit | 5 ++- nova/api/openstack/contrib/simple_tenant_usage.py | 14 ++++----- nova/compute/api.py | 17 +++++++--- nova/db/api.py | 16 +++++++--- nova/db/sqlalchemy/api.py | 36 +++++++++++++++------- .../openstack/contrib/test_simple_tenant_usage.py | 10 +++--- 6 files changed, 62 insertions(+), 36 deletions(-) diff --git a/bin/instance-usage-audit b/bin/instance-usage-audit index a06c6b1b3..7ce5732e7 100755 --- a/bin/instance-usage-audit +++ b/bin/instance-usage-audit @@ -102,9 +102,8 @@ if __name__ == '__main__': logging.setup() begin, end = time_period(FLAGS.instance_usage_audit_period) print "Creating usages for %s until %s" % (str(begin), str(end)) - instances = db.instance_get_active_by_window(context.get_admin_context(), - begin, - end) + ctxt = context.get_admin_context() + instances = db.instance_get_active_by_window_joined(ctxt, begin, end) print "%s instances" % len(instances) for instance_ref in instances: usage_info = utils.usage_from_instance(instance_ref, diff --git a/nova/api/openstack/contrib/simple_tenant_usage.py b/nova/api/openstack/contrib/simple_tenant_usage.py index 16e712815..363ac1451 100644 --- a/nova/api/openstack/contrib/simple_tenant_usage.py +++ b/nova/api/openstack/contrib/simple_tenant_usage.py @@ -19,10 +19,9 @@ import urlparse import webob from datetime import datetime -from nova import db from nova import exception from nova import flags -from nova.compute import instance_types +from nova.compute import api from nova.api.openstack import extensions from nova.api.openstack import views from nova.db.sqlalchemy.session import get_session @@ -71,11 +70,11 @@ class SimpleTenantUsageController(object): def _tenant_usages_for_period(self, context, period_start, period_stop, tenant_id=None, detailed=True): - instances = db.instance_get_active_by_window(context, + compute_api = api.API() + instances = compute_api.get_active_by_window(context, period_start, period_stop, - tenant_id, - fast=True) + tenant_id) from nova import log as logging logging.info(instances) rval = {} @@ -90,8 +89,9 @@ class SimpleTenantUsageController(object): if not flavors.get(flavor_type): try: - flavors[flavor_type] = db.instance_type_get(context, - flavor_type) + it_ref = compute_api.get_instance_type(context, + flavor_type) + flavors[flavor_type] = it_ref except exception.InstanceTypeNotFound: # can't bill if there is no instance type continue diff --git a/nova/compute/api.py b/nova/compute/api.py index 3b4bde8ea..53bab53a4 100644 --- a/nova/compute/api.py +++ b/nova/compute/api.py @@ -19,13 +19,11 @@ """Handles all requests relating to instances (guest vms).""" -import eventlet import novaclient import re import time from nova import block_device -from nova import db from nova import exception from nova import flags import nova.image @@ -237,7 +235,7 @@ class API(base.Base): self.ensure_default_security_group(context) if key_data is None and key_name: - key_pair = db.key_pair_get(context, context.user_id, key_name) + key_pair = self.db.key_pair_get(context, context.user_id, key_name) key_data = key_pair['public_key'] if reservation_id is None: @@ -802,6 +800,15 @@ class API(base.Base): "args": {"topic": FLAGS.compute_topic, "instance_id": instance_id}}) + def get_active_by_window(self, context, begin, end=None, project_id=None): + """Get instances that were continuously active over a window.""" + return self.db.instance_get_active_by_window(context, begin, end, + project_id) + + def get_instance_type(self, context, instance_type_id): + """Get an instance type by instance type id.""" + return self.db.instance_type_get(context, instance_type_id) + def get(self, context, instance_id): """Get a single instance with the given instance_id.""" # NOTE(sirp): id used to be exclusively integer IDs; now we're @@ -1001,7 +1008,7 @@ class API(base.Base): :param extra_properties: dict of extra image properties to include """ - instance = db.api.instance_get(context, instance_id) + instance = self.db.api.instance_get(context, instance_id) properties = {'instance_uuid': instance['uuid'], 'user_id': str(context.user_id), 'image_state': 'creating', @@ -1026,7 +1033,7 @@ class API(base.Base): def rebuild(self, context, instance_id, image_href, admin_password, name=None, metadata=None, files_to_inject=None): """Rebuild the given instance with the provided metadata.""" - instance = db.api.instance_get(context, instance_id) + instance = self.db.instance_get(context, instance_id) if instance["state"] == power_state.BUILDING: msg = _("Instance already building") diff --git a/nova/db/api.py b/nova/db/api.py index 3233985b6..148887635 100644 --- a/nova/db/api.py +++ b/nova/db/api.py @@ -496,14 +496,20 @@ def instance_get_all_by_filters(context, filters): return IMPL.instance_get_all_by_filters(context, filters) -def instance_get_active_by_window(context, begin, end=None, - project_id=None, fast=False): +def instance_get_active_by_window(context, begin, end=None, project_id=None): """Get instances active during a certain time window. - Setting fast to true will stop all joinedloads. Specifying a project_id will filter for a certain project.""" - return IMPL.instance_get_active_by_window(context, begin, end, - project_id, fast) + return IMPL.instance_get_active_by_window(context, begin, end, project_id) + + +def instance_get_active_by_window_joined(context, begin, end=None, + project_id=None): + """Get instances and joins active during a certain time window. + + Specifying a project_id will filter for a certain project.""" + return IMPL.instance_get_active_by_window_joined(context, begin, end, + project_id) def instance_get_all_by_user(context, user_id): diff --git a/nova/db/sqlalchemy/api.py b/nova/db/sqlalchemy/api.py index c4cc199eb..d76dc22ed 100644 --- a/nova/db/sqlalchemy/api.py +++ b/nova/db/sqlalchemy/api.py @@ -1306,19 +1306,33 @@ def instance_get_all_by_filters(context, filters): return instances -@require_admin_context -def instance_get_active_by_window(context, begin, end=None, - project_id=None, fast=False): - """Return instances that were continuously active over the given window""" +@require_context +def instance_get_active_by_window(context, begin, end=None, project_id=None): + """Return instances that were continuously active over window.""" session = get_session() - query = session.query(models.Instance) - if not fast: - query = query.options(joinedload_all('fixed_ips.floating_ips')).\ - options(joinedload('security_groups')).\ - options(joinedload_all('fixed_ips.network')).\ - options(joinedload('instance_type')) + query = session.query(models.Instance).\ + filter(models.Instance.launched_at < begin) + if end: + query = query.filter(or_(models.Instance.terminated_at == None, + models.Instance.terminated_at > end)) + else: + query = query.filter(models.Instance.terminated_at == None) + if project_id: + query = query.filter_by(project_id=project_id) + return query.all() + - query = query.filter(models.Instance.launched_at < begin) +@require_admin_context +def instance_get_active_by_window_joined(context, begin, end=None, + project_id=None): + """Return instances and joins that were continuously active over window.""" + session = get_session() + query = session.query(models.Instance).\ + options(joinedload_all('fixed_ips.floating_ips')).\ + options(joinedload('security_groups')).\ + options(joinedload_all('fixed_ips.network')).\ + options(joinedload('instance_type')).\ + filter(models.Instance.launched_at < begin) if end: query = query.filter(or_(models.Instance.terminated_at == None, models.Instance.terminated_at > end)) diff --git a/nova/tests/api/openstack/contrib/test_simple_tenant_usage.py b/nova/tests/api/openstack/contrib/test_simple_tenant_usage.py index 2bd619820..de0a6d779 100644 --- a/nova/tests/api/openstack/contrib/test_simple_tenant_usage.py +++ b/nova/tests/api/openstack/contrib/test_simple_tenant_usage.py @@ -20,9 +20,9 @@ import json import webob from nova import context -from nova import db from nova import flags from nova import test +from nova.compute import api from nova.tests.api.openstack import fakes @@ -39,7 +39,7 @@ STOP = datetime.datetime.utcnow() START = STOP - datetime.timedelta(hours=HOURS) -def fake_instance_type_get(context, instance_type_id): +def fake_instance_type_get(self, context, instance_type_id): return {'id': 1, 'vcpus': VCPUS, 'local_gb': LOCAL_GB, @@ -59,7 +59,7 @@ def get_fake_db_instance(start, end, instance_id, tenant_id): 'launched_at': start, 'terminated_at': end} -def fake_instance_get_active_by_window(context, begin, end, project_id, fast): +def fake_instance_get_active_by_window(self, context, begin, end, project_id): return [get_fake_db_instance(START, STOP, x, @@ -70,9 +70,9 @@ def fake_instance_get_active_by_window(context, begin, end, project_id, fast): class SimpleTenantUsageTest(test.TestCase): def setUp(self): super(SimpleTenantUsageTest, self).setUp() - self.stubs.Set(db, "instance_type_get", + self.stubs.Set(api.API, "get_instance_type", fake_instance_type_get) - self.stubs.Set(db, "instance_get_active_by_window", + self.stubs.Set(api.API, "get_active_by_window", fake_instance_get_active_by_window) self.admin_context = context.RequestContext('fakeadmin_0', 'faketenant_0', -- cgit From 85e182e72d8f15678234701f6b254bf6c8e17f3a Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Tue, 30 Aug 2011 12:08:53 -0700 Subject: fix a bunch of direct usages of db in compute api --- nova/compute/api.py | 52 +++++++++++++++++++++++++++------------------------- 1 file changed, 27 insertions(+), 25 deletions(-) diff --git a/nova/compute/api.py b/nova/compute/api.py index 53bab53a4..f1099576e 100644 --- a/nova/compute/api.py +++ b/nova/compute/api.py @@ -387,9 +387,9 @@ class API(base.Base): security_groups = [] for security_group_name in security_group: - group = db.security_group_get_by_name(context, - context.project_id, - security_group_name) + group = self.db.security_group_get_by_name(context, + context.project_id, + security_group_name) security_groups.append(group['id']) for security_group_id in security_groups: @@ -549,8 +549,9 @@ class API(base.Base): def has_finished_migration(self, context, instance_uuid): """Returns true if an instance has a finished migration.""" try: - db.migration_get_by_instance_and_status(context, instance_uuid, - 'finished') + self.db.migration_get_by_instance_and_status(context, + instance_uuid, + 'finished') return True except exception.NotFound: return False @@ -564,14 +565,15 @@ class API(base.Base): :param context: the security context """ try: - db.security_group_get_by_name(context, context.project_id, - 'default') + self.db.security_group_get_by_name(context, + context.project_id, + 'default') except exception.NotFound: values = {'name': 'default', 'description': 'default', 'user_id': context.user_id, 'project_id': context.project_id} - db.security_group_create(context, values) + self.db.security_group_create(context, values) def trigger_security_group_rules_refresh(self, context, security_group_id): """Called when a rule is added to or removed from a security_group.""" @@ -636,7 +638,7 @@ class API(base.Base): """Called when a rule is added to or removed from a security_group""" hosts = [x['host'] for (x, idx) - in db.service_get_all_compute_sorted(context)] + in self.db.service_get_all_compute_sorted(context)] for host in hosts: rpc.cast(context, self.db.queue_get_for(context, FLAGS.compute_topic, host), @@ -664,11 +666,11 @@ class API(base.Base): def add_security_group(self, context, instance_id, security_group_name): """Add security group to the instance""" - security_group = db.security_group_get_by_name(context, - context.project_id, - security_group_name) + security_group = self.db.security_group_get_by_name(context, + context.project_id, + security_group_name) # check if the server exists - inst = db.instance_get(context, instance_id) + inst = self.db.instance_get(context, instance_id) #check if the security group is associated with the server if self._is_security_group_associated_with_server(security_group, instance_id): @@ -680,21 +682,21 @@ class API(base.Base): if inst['state'] != power_state.RUNNING: raise exception.InstanceNotRunning(instance_id=instance_id) - db.instance_add_security_group(context.elevated(), - instance_id, - security_group['id']) + self.db.instance_add_security_group(context.elevated(), + instance_id, + security_group['id']) rpc.cast(context, - db.queue_get_for(context, FLAGS.compute_topic, inst['host']), + self.db.queue_get_for(context, FLAGS.compute_topic, inst['host']), {"method": "refresh_security_group_rules", "args": {"security_group_id": security_group['id']}}) def remove_security_group(self, context, instance_id, security_group_name): """Remove the security group associated with the instance""" - security_group = db.security_group_get_by_name(context, - context.project_id, - security_group_name) + security_group = self.db.security_group_get_by_name(context, + context.project_id, + security_group_name) # check if the server exists - inst = db.instance_get(context, instance_id) + inst = self.db.instance_get(context, instance_id) #check if the security group is associated with the server if not self._is_security_group_associated_with_server(security_group, instance_id): @@ -706,11 +708,11 @@ class API(base.Base): if inst['state'] != power_state.RUNNING: raise exception.InstanceNotRunning(instance_id=instance_id) - db.instance_remove_security_group(context.elevated(), - instance_id, - security_group['id']) + self.db.instance_remove_security_group(context.elevated(), + instance_id, + security_group['id']) rpc.cast(context, - db.queue_get_for(context, FLAGS.compute_topic, inst['host']), + self.db.queue_get_for(context, FLAGS.compute_topic, inst['host']), {"method": "refresh_security_group_rules", "args": {"security_group_id": security_group['id']}}) -- cgit From 2fcc6da8ba528c5169f7394d57f90ccd2754a23c Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Tue, 30 Aug 2011 12:14:25 -0700 Subject: pep8, fix fakes --- nova/api/openstack/contrib/simple_tenant_usage.py | 1 + nova/compute/api.py | 12 ++++++------ nova/tests/api/openstack/contrib/test_createserverext.py | 2 ++ nova/tests/api/openstack/contrib/test_simple_tenant_usage.py | 2 +- nova/tests/api/openstack/test_servers.py | 1 + 5 files changed, 11 insertions(+), 7 deletions(-) diff --git a/nova/api/openstack/contrib/simple_tenant_usage.py b/nova/api/openstack/contrib/simple_tenant_usage.py index 363ac1451..e81aef66e 100644 --- a/nova/api/openstack/contrib/simple_tenant_usage.py +++ b/nova/api/openstack/contrib/simple_tenant_usage.py @@ -30,6 +30,7 @@ from webob import exc FLAGS = flags.FLAGS + class SimpleTenantUsageController(object): def _hours_for(self, instance, period_start, period_stop): launched_at = instance['launched_at'] diff --git a/nova/compute/api.py b/nova/compute/api.py index f1099576e..0074028e2 100644 --- a/nova/compute/api.py +++ b/nova/compute/api.py @@ -388,8 +388,8 @@ class API(base.Base): security_groups = [] for security_group_name in security_group: group = self.db.security_group_get_by_name(context, - context.project_id, - security_group_name) + context.project_id, + security_group_name) security_groups.append(group['id']) for security_group_id in security_groups: @@ -667,8 +667,8 @@ class API(base.Base): def add_security_group(self, context, instance_id, security_group_name): """Add security group to the instance""" security_group = self.db.security_group_get_by_name(context, - context.project_id, - security_group_name) + context.project_id, + security_group_name) # check if the server exists inst = self.db.instance_get(context, instance_id) #check if the security group is associated with the server @@ -693,8 +693,8 @@ class API(base.Base): def remove_security_group(self, context, instance_id, security_group_name): """Remove the security group associated with the instance""" security_group = self.db.security_group_get_by_name(context, - context.project_id, - security_group_name) + context.project_id, + security_group_name) # check if the server exists inst = self.db.instance_get(context, instance_id) #check if the security group is associated with the server diff --git a/nova/tests/api/openstack/contrib/test_createserverext.py b/nova/tests/api/openstack/contrib/test_createserverext.py index e5eed14fe..f6d9ba784 100644 --- a/nova/tests/api/openstack/contrib/test_createserverext.py +++ b/nova/tests/api/openstack/contrib/test_createserverext.py @@ -23,6 +23,7 @@ from xml.dom import minidom import stubout import webob +from nova import db from nova import exception from nova import flags from nova import test @@ -76,6 +77,7 @@ class CreateserverextTest(test.TestCase): def __init__(self): self.injected_files = None self.networks = None + self.db = db def create(self, *args, **kwargs): if 'injected_files' in kwargs: diff --git a/nova/tests/api/openstack/contrib/test_simple_tenant_usage.py b/nova/tests/api/openstack/contrib/test_simple_tenant_usage.py index de0a6d779..2430b9d51 100644 --- a/nova/tests/api/openstack/contrib/test_simple_tenant_usage.py +++ b/nova/tests/api/openstack/contrib/test_simple_tenant_usage.py @@ -26,7 +26,6 @@ from nova.compute import api from nova.tests.api.openstack import fakes - FLAGS = flags.FLAGS SERVERS = 5 @@ -59,6 +58,7 @@ def get_fake_db_instance(start, end, instance_id, tenant_id): 'launched_at': start, 'terminated_at': end} + def fake_instance_get_active_by_window(self, context, begin, end, project_id): return [get_fake_db_instance(START, STOP, diff --git a/nova/tests/api/openstack/test_servers.py b/nova/tests/api/openstack/test_servers.py index 3559e6de5..d065f48b6 100644 --- a/nova/tests/api/openstack/test_servers.py +++ b/nova/tests/api/openstack/test_servers.py @@ -3229,6 +3229,7 @@ class TestServerInstanceCreation(test.TestCase): def __init__(self): self.injected_files = None self.networks = None + self.db = db def create(self, *args, **kwargs): if 'injected_files' in kwargs: -- cgit From 5cf27b5a338f7821f82c91df5889159b56fa0bb6 Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Tue, 30 Aug 2011 12:41:30 -0700 Subject: fix remaining tests --- nova/api/openstack/contrib/simple_tenant_usage.py | 2 +- nova/compute/api.py | 2 +- .../api/openstack/contrib/test_security_groups.py | 72 +++++++++++----------- nova/tests/api/openstack/test_extensions.py | 1 + nova/tests/api/openstack/test_server_actions.py | 2 +- 5 files changed, 40 insertions(+), 39 deletions(-) diff --git a/nova/api/openstack/contrib/simple_tenant_usage.py b/nova/api/openstack/contrib/simple_tenant_usage.py index e81aef66e..69b38e229 100644 --- a/nova/api/openstack/contrib/simple_tenant_usage.py +++ b/nova/api/openstack/contrib/simple_tenant_usage.py @@ -212,7 +212,7 @@ class SimpleTenantUsageController(object): class Simple_tenant_usage(extensions.ExtensionDescriptor): def get_name(self): - return "Simple_tenant_usage" + return "SimpleTenantUsage" def get_alias(self): return "os-simple-tenant-usage" diff --git a/nova/compute/api.py b/nova/compute/api.py index 0074028e2..205207d66 100644 --- a/nova/compute/api.py +++ b/nova/compute/api.py @@ -1010,7 +1010,7 @@ class API(base.Base): :param extra_properties: dict of extra image properties to include """ - instance = self.db.api.instance_get(context, instance_id) + instance = self.db.instance_get(context, instance_id) properties = {'instance_uuid': instance['uuid'], 'user_id': str(context.user_id), 'image_state': 'creating', diff --git a/nova/tests/api/openstack/contrib/test_security_groups.py b/nova/tests/api/openstack/contrib/test_security_groups.py index bc1536911..0816a6312 100644 --- a/nova/tests/api/openstack/contrib/test_security_groups.py +++ b/nova/tests/api/openstack/contrib/test_security_groups.py @@ -360,7 +360,7 @@ class TestSecurityGroups(test.TestCase): def test_associate_by_invalid_server_id(self): body = dict(addSecurityGroup=dict(name='test')) - self.stubs.Set(nova.db, 'security_group_get_by_name', + self.stubs.Set(nova.db.api, 'security_group_get_by_name', return_security_group) req = webob.Request.blank('/v1.1/123/servers/invalid/action') req.headers['Content-Type'] = 'application/json' @@ -372,7 +372,7 @@ class TestSecurityGroups(test.TestCase): def test_associate_without_body(self): req = webob.Request.blank('/v1.1/123/servers/1/action') body = dict(addSecurityGroup=None) - self.stubs.Set(nova.db, 'instance_get', return_server) + self.stubs.Set(nova.db.api, 'instance_get', return_server) req.headers['Content-Type'] = 'application/json' req.method = 'POST' req.body = json.dumps(body) @@ -382,7 +382,7 @@ class TestSecurityGroups(test.TestCase): def test_associate_no_security_group_name(self): req = webob.Request.blank('/v1.1/123/servers/1/action') body = dict(addSecurityGroup=dict()) - self.stubs.Set(nova.db, 'instance_get', return_server) + self.stubs.Set(nova.db.api, 'instance_get', return_server) req.headers['Content-Type'] = 'application/json' req.method = 'POST' req.body = json.dumps(body) @@ -392,7 +392,7 @@ class TestSecurityGroups(test.TestCase): def test_associate_security_group_name_with_whitespaces(self): req = webob.Request.blank('/v1.1/123/servers/1/action') body = dict(addSecurityGroup=dict(name=" ")) - self.stubs.Set(nova.db, 'instance_get', return_server) + self.stubs.Set(nova.db.api, 'instance_get', return_server) req.headers['Content-Type'] = 'application/json' req.method = 'POST' req.body = json.dumps(body) @@ -400,9 +400,9 @@ class TestSecurityGroups(test.TestCase): self.assertEquals(response.status_int, 400) def test_associate_non_existing_instance(self): - self.stubs.Set(nova.db, 'instance_get', return_server_nonexistant) + self.stubs.Set(nova.db.api, 'instance_get', return_server_nonexistant) body = dict(addSecurityGroup=dict(name="test")) - self.stubs.Set(nova.db, 'security_group_get_by_name', + self.stubs.Set(nova.db.api, 'security_group_get_by_name', return_security_group) req = webob.Request.blank('/v1.1/123/servers/10000/action') req.headers['Content-Type'] = 'application/json' @@ -412,8 +412,8 @@ class TestSecurityGroups(test.TestCase): self.assertEquals(response.status_int, 404) def test_associate_non_running_instance(self): - self.stubs.Set(nova.db, 'instance_get', return_non_running_server) - self.stubs.Set(nova.db, 'security_group_get_by_name', + self.stubs.Set(nova.db.api, 'instance_get', return_non_running_server) + self.stubs.Set(nova.db.api, 'security_group_get_by_name', return_security_group_without_instances) body = dict(addSecurityGroup=dict(name="test")) req = webob.Request.blank('/v1.1/123/servers/1/action') @@ -424,8 +424,8 @@ class TestSecurityGroups(test.TestCase): self.assertEquals(response.status_int, 400) def test_associate_already_associated_security_group_to_instance(self): - self.stubs.Set(nova.db, 'instance_get', return_server) - self.stubs.Set(nova.db, 'security_group_get_by_name', + self.stubs.Set(nova.db.api, 'instance_get', return_server) + self.stubs.Set(nova.db.api, 'security_group_get_by_name', return_security_group) body = dict(addSecurityGroup=dict(name="test")) req = webob.Request.blank('/v1.1/123/servers/1/action') @@ -436,12 +436,12 @@ class TestSecurityGroups(test.TestCase): self.assertEquals(response.status_int, 400) def test_associate(self): - self.stubs.Set(nova.db, 'instance_get', return_server) - self.mox.StubOutWithMock(nova.db, 'instance_add_security_group') - nova.db.instance_add_security_group(mox.IgnoreArg(), + self.stubs.Set(nova.db.api, 'instance_get', return_server) + self.mox.StubOutWithMock(nova.db.api, 'instance_add_security_group') + nova.db.api.instance_add_security_group(mox.IgnoreArg(), mox.IgnoreArg(), mox.IgnoreArg()) - self.stubs.Set(nova.db, 'security_group_get_by_name', + self.stubs.Set(nova.db.api, 'security_group_get_by_name', return_security_group_without_instances) self.mox.ReplayAll() @@ -454,12 +454,12 @@ class TestSecurityGroups(test.TestCase): self.assertEquals(response.status_int, 202) def test_associate_xml(self): - self.stubs.Set(nova.db, 'instance_get', return_server) - self.mox.StubOutWithMock(nova.db, 'instance_add_security_group') - nova.db.instance_add_security_group(mox.IgnoreArg(), + self.stubs.Set(nova.db.api, 'instance_get', return_server) + self.mox.StubOutWithMock(nova.db.api, 'instance_add_security_group') + nova.db.api.instance_add_security_group(mox.IgnoreArg(), mox.IgnoreArg(), mox.IgnoreArg()) - self.stubs.Set(nova.db, 'security_group_get_by_name', + self.stubs.Set(nova.db.api, 'security_group_get_by_name', return_security_group_without_instances) self.mox.ReplayAll() @@ -483,7 +483,7 @@ class TestSecurityGroups(test.TestCase): def test_disassociate_by_invalid_server_id(self): body = dict(removeSecurityGroup=dict(name='test')) - self.stubs.Set(nova.db, 'security_group_get_by_name', + self.stubs.Set(nova.db.api, 'security_group_get_by_name', return_security_group) req = webob.Request.blank('/v1.1/123/servers/invalid/action') req.headers['Content-Type'] = 'application/json' @@ -495,7 +495,7 @@ class TestSecurityGroups(test.TestCase): def test_disassociate_without_body(self): req = webob.Request.blank('/v1.1/123/servers/1/action') body = dict(removeSecurityGroup=None) - self.stubs.Set(nova.db, 'instance_get', return_server) + self.stubs.Set(nova.db.api, 'instance_get', return_server) req.headers['Content-Type'] = 'application/json' req.method = 'POST' req.body = json.dumps(body) @@ -505,7 +505,7 @@ class TestSecurityGroups(test.TestCase): def test_disassociate_no_security_group_name(self): req = webob.Request.blank('/v1.1/123/servers/1/action') body = dict(removeSecurityGroup=dict()) - self.stubs.Set(nova.db, 'instance_get', return_server) + self.stubs.Set(nova.db.api, 'instance_get', return_server) req.headers['Content-Type'] = 'application/json' req.method = 'POST' req.body = json.dumps(body) @@ -515,7 +515,7 @@ class TestSecurityGroups(test.TestCase): def test_disassociate_security_group_name_with_whitespaces(self): req = webob.Request.blank('/v1.1/123/servers/1/action') body = dict(removeSecurityGroup=dict(name=" ")) - self.stubs.Set(nova.db, 'instance_get', return_server) + self.stubs.Set(nova.db.api, 'instance_get', return_server) req.headers['Content-Type'] = 'application/json' req.method = 'POST' req.body = json.dumps(body) @@ -523,9 +523,9 @@ class TestSecurityGroups(test.TestCase): self.assertEquals(response.status_int, 400) def test_disassociate_non_existing_instance(self): - self.stubs.Set(nova.db, 'instance_get', return_server_nonexistant) + self.stubs.Set(nova.db.api, 'instance_get', return_server_nonexistant) body = dict(removeSecurityGroup=dict(name="test")) - self.stubs.Set(nova.db, 'security_group_get_by_name', + self.stubs.Set(nova.db.api, 'security_group_get_by_name', return_security_group) req = webob.Request.blank('/v1.1/123/servers/10000/action') req.headers['Content-Type'] = 'application/json' @@ -535,8 +535,8 @@ class TestSecurityGroups(test.TestCase): self.assertEquals(response.status_int, 404) def test_disassociate_non_running_instance(self): - self.stubs.Set(nova.db, 'instance_get', return_non_running_server) - self.stubs.Set(nova.db, 'security_group_get_by_name', + self.stubs.Set(nova.db.api, 'instance_get', return_non_running_server) + self.stubs.Set(nova.db.api, 'security_group_get_by_name', return_security_group) body = dict(removeSecurityGroup=dict(name="test")) req = webob.Request.blank('/v1.1/123/servers/1/action') @@ -547,8 +547,8 @@ class TestSecurityGroups(test.TestCase): self.assertEquals(response.status_int, 400) def test_disassociate_already_associated_security_group_to_instance(self): - self.stubs.Set(nova.db, 'instance_get', return_server) - self.stubs.Set(nova.db, 'security_group_get_by_name', + self.stubs.Set(nova.db.api, 'instance_get', return_server) + self.stubs.Set(nova.db.api, 'security_group_get_by_name', return_security_group_without_instances) body = dict(removeSecurityGroup=dict(name="test")) req = webob.Request.blank('/v1.1/123/servers/1/action') @@ -559,12 +559,12 @@ class TestSecurityGroups(test.TestCase): self.assertEquals(response.status_int, 400) def test_disassociate(self): - self.stubs.Set(nova.db, 'instance_get', return_server) - self.mox.StubOutWithMock(nova.db, 'instance_remove_security_group') - nova.db.instance_remove_security_group(mox.IgnoreArg(), + self.stubs.Set(nova.db.api, 'instance_get', return_server) + self.mox.StubOutWithMock(nova.db.api, 'instance_remove_security_group') + nova.db.api.instance_remove_security_group(mox.IgnoreArg(), mox.IgnoreArg(), mox.IgnoreArg()) - self.stubs.Set(nova.db, 'security_group_get_by_name', + self.stubs.Set(nova.db.api, 'security_group_get_by_name', return_security_group) self.mox.ReplayAll() @@ -577,12 +577,12 @@ class TestSecurityGroups(test.TestCase): self.assertEquals(response.status_int, 202) def test_disassociate_xml(self): - self.stubs.Set(nova.db, 'instance_get', return_server) - self.mox.StubOutWithMock(nova.db, 'instance_remove_security_group') - nova.db.instance_remove_security_group(mox.IgnoreArg(), + self.stubs.Set(nova.db.api, 'instance_get', return_server) + self.mox.StubOutWithMock(nova.db.api, 'instance_remove_security_group') + nova.db.api.instance_remove_security_group(mox.IgnoreArg(), mox.IgnoreArg(), mox.IgnoreArg()) - self.stubs.Set(nova.db, 'security_group_get_by_name', + self.stubs.Set(nova.db.api, 'security_group_get_by_name', return_security_group) self.mox.ReplayAll() diff --git a/nova/tests/api/openstack/test_extensions.py b/nova/tests/api/openstack/test_extensions.py index 05267d8fb..31443242b 100644 --- a/nova/tests/api/openstack/test_extensions.py +++ b/nova/tests/api/openstack/test_extensions.py @@ -95,6 +95,7 @@ class ExtensionControllerTest(test.TestCase): "Quotas", "Rescue", "SecurityGroups", + "SimpleTenantUsage", "VSAs", "VirtualInterfaces", "Volumes", diff --git a/nova/tests/api/openstack/test_server_actions.py b/nova/tests/api/openstack/test_server_actions.py index 3dfdeb79c..c9c33abbd 100644 --- a/nova/tests/api/openstack/test_server_actions.py +++ b/nova/tests/api/openstack/test_server_actions.py @@ -248,7 +248,7 @@ class ServerActionsTest(test.TestCase): def fake_migration_get(*args): return {} - self.stubs.Set(nova.db, 'migration_get_by_instance_and_status', + self.stubs.Set(nova.db.api, 'migration_get_by_instance_and_status', fake_migration_get) res = req.get_response(fakes.wsgi_app()) self.assertEqual(res.status_int, 200) -- cgit From 4f65e0153c22886b118bdb92402b91d9b209632c Mon Sep 17 00:00:00 2001 From: Chris Behrens Date: Tue, 30 Aug 2011 13:53:01 -0700 Subject: restore old way FLAGS.rpc_backend worked.. no short name support for consistency --- nova/rpc/__init__.py | 12 ++---------- 1 file changed, 2 insertions(+), 10 deletions(-) diff --git a/nova/rpc/__init__.py b/nova/rpc/__init__.py index fe50fb476..4f57a345d 100644 --- a/nova/rpc/__init__.py +++ b/nova/rpc/__init__.py @@ -23,18 +23,10 @@ from nova import flags FLAGS = flags.FLAGS flags.DEFINE_string('rpc_backend', - 'kombu', + 'nova.rpc.impl_kombu', "The messaging module to use, defaults to kombu.") -impl_table = {'kombu': 'nova.rpc.impl_kombu', - 'amqp': 'nova.rpc.impl_kombu', - 'carrot': 'nova.rpc.impl_carrot'} - - -# rpc_backend can be a short name like 'kombu', or it can be the full -# module name -RPCIMPL = import_object(impl_table.get(FLAGS.rpc_backend, - FLAGS.rpc_backend)) +RPCIMPL = import_object(FLAGS.rpc_backend) def create_connection(new=True): -- cgit From b6c306b1a207fd2c5ee2e53d841fd8e60c2fd8e1 Mon Sep 17 00:00:00 2001 From: Chris Behrens Date: Tue, 30 Aug 2011 13:55:06 -0700 Subject: add kombu to pip-requires and contrib/nova.sh --- contrib/nova.sh | 2 +- tools/pip-requires | 1 + 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/contrib/nova.sh b/contrib/nova.sh index 7994e5133..16cddebd5 100755 --- a/contrib/nova.sh +++ b/contrib/nova.sh @@ -81,7 +81,7 @@ if [ "$CMD" == "install" ]; then sudo apt-get install -y python-netaddr python-pastedeploy python-eventlet sudo apt-get install -y python-novaclient python-glance python-cheetah sudo apt-get install -y python-carrot python-tempita python-sqlalchemy - sudo apt-get install -y python-suds + sudo apt-get install -y python-suds python-kombu if [ "$USE_IPV6" == 1 ]; then diff --git a/tools/pip-requires b/tools/pip-requires index 60b502ffd..66d6a48d9 100644 --- a/tools/pip-requires +++ b/tools/pip-requires @@ -8,6 +8,7 @@ anyjson==0.2.4 boto==1.9b carrot==0.10.5 eventlet +kombu lockfile==0.8 lxml==2.3 python-novaclient==2.6.0 -- cgit From e326acf46748904704dd97f511927559dc2480f2 Mon Sep 17 00:00:00 2001 From: Tushar Patil Date: Tue, 30 Aug 2011 15:05:39 -0700 Subject: Fix for LP Bug #837534 --- nova/api/openstack/create_instance_helper.py | 13 ++++++ .../api/openstack/contrib/test_createserverext.py | 46 +++++++++++++++++++++- 2 files changed, 58 insertions(+), 1 deletion(-) diff --git a/nova/api/openstack/create_instance_helper.py b/nova/api/openstack/create_instance_helper.py index 483ff4985..019283fdf 100644 --- a/nova/api/openstack/create_instance_helper.py +++ b/nova/api/openstack/create_instance_helper.py @@ -138,7 +138,10 @@ class CreateInstanceHelper(object): raise exc.HTTPBadRequest(explanation=msg) zone_blob = server_dict.get('blob') + user_data = server_dict.get('user_data') + self._validate_user_data(user_data) + availability_zone = server_dict.get('availability_zone') name = server_dict['name'] self._validate_server_name(name) @@ -370,6 +373,16 @@ class CreateInstanceHelper(object): return networks + def _validate_user_data(self, user_data): + """Check if the user_data is encoded properly""" + if not user_data: + return + try: + user_data = base64.b64decode(user_data) + except TypeError: + expl = _('Userdata content cannot be decoded') + raise exc.HTTPBadRequest(explanation=expl) + class ServerXMLDeserializer(wsgi.XMLDeserializer): """ diff --git a/nova/tests/api/openstack/contrib/test_createserverext.py b/nova/tests/api/openstack/contrib/test_createserverext.py index e5eed14fe..65c7c1682 100644 --- a/nova/tests/api/openstack/contrib/test_createserverext.py +++ b/nova/tests/api/openstack/contrib/test_createserverext.py @@ -37,7 +37,8 @@ import nova.image.fake import nova.rpc from nova.tests.api.openstack import fakes - +from nova import log as logging +LOG = logging.getLogger('nova.api.openstack.test') FLAGS = flags.FLAGS FLAGS.verbose = True @@ -76,6 +77,7 @@ class CreateserverextTest(test.TestCase): def __init__(self): self.injected_files = None self.networks = None + self.user_data = None def create(self, *args, **kwargs): if 'injected_files' in kwargs: @@ -87,6 +89,10 @@ class CreateserverextTest(test.TestCase): self.networks = kwargs['requested_networks'] else: self.networks = None + + if 'user_data' in kwargs: + self.user_data = kwargs['user_data'] + return [{'id': '1234', 'display_name': 'fakeinstance', 'uuid': FAKE_UUID, 'created_at': "", @@ -119,6 +125,14 @@ class CreateserverextTest(test.TestCase): server['networks'] = network_list return {'server': server} + def _create_user_data_request_dict(self, user_data): + server = {} + server['name'] = 'new-server-test' + server['imageRef'] = 1 + server['flavorRef'] = 1 + server['user_data'] = user_data + return {'server': server} + def _get_create_request_json(self, body_dict): req = webob.Request.blank('/v1.1/123/os-create-server-ext') req.headers['Content-Type'] = 'application/json' @@ -178,6 +192,13 @@ class CreateserverextTest(test.TestCase): self._run_create_instance_with_mock_compute_api(request) return request, response, compute_api.networks + def _create_instance_with_user_data_json(self, networks): + body_dict = self._create_user_data_request_dict(networks) + request = self._get_create_request_json(body_dict) + compute_api, response = \ + self._run_create_instance_with_mock_compute_api(request) + return request, response, compute_api.user_data + def _create_instance_with_networks_xml(self, networks): body_dict = self._create_networks_request_dict(networks) request = self._get_create_request_xml(body_dict) @@ -188,6 +209,7 @@ class CreateserverextTest(test.TestCase): def test_create_instance_with_no_networks(self): request, response, networks = \ self._create_instance_with_networks_json(networks=None) + LOG.debug(response) self.assertEquals(response.status_int, 202) self.assertEquals(networks, None) @@ -304,3 +326,25 @@ class CreateserverextTest(test.TestCase): self.assertEquals(response.status_int, 202) self.assertEquals(compute_api.networks, [('aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa', None)]) + + def test_create_instance_with_userdata(self): + user_data_contents = '#!/bin/bash\necho "Oh no!"\n' + user_data_contents = base64.b64encode(user_data_contents) + request, response, user_data = \ + self._create_instance_with_user_data_json(user_data_contents) + self.assertEquals(response.status_int, 202) + self.assertEquals(user_data, user_data_contents) + + def test_create_instance_with_userdata_none(self): + user_data_contents = None + request, response, user_data = \ + self._create_instance_with_user_data_json(user_data_contents) + self.assertEquals(response.status_int, 202) + self.assertEquals(user_data, user_data_contents) + + def test_create_instance_with_userdata_with_non_b64_content(self): + user_data_contents = '#!/bin/bash\necho "Oh no!"\n' + request, response, user_data = \ + self._create_instance_with_user_data_json(user_data_contents) + self.assertEquals(response.status_int, 400) + self.assertEquals(user_data, None) -- cgit From 476101d81cf81e6035b44e2257c1bcd8e958043a Mon Sep 17 00:00:00 2001 From: Tushar Patil Date: Tue, 30 Aug 2011 15:09:08 -0700 Subject: Removed debug messages --- nova/tests/api/openstack/contrib/test_createserverext.py | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/nova/tests/api/openstack/contrib/test_createserverext.py b/nova/tests/api/openstack/contrib/test_createserverext.py index 65c7c1682..d8a5c9e55 100644 --- a/nova/tests/api/openstack/contrib/test_createserverext.py +++ b/nova/tests/api/openstack/contrib/test_createserverext.py @@ -37,8 +37,7 @@ import nova.image.fake import nova.rpc from nova.tests.api.openstack import fakes -from nova import log as logging -LOG = logging.getLogger('nova.api.openstack.test') + FLAGS = flags.FLAGS FLAGS.verbose = True @@ -209,7 +208,6 @@ class CreateserverextTest(test.TestCase): def test_create_instance_with_no_networks(self): request, response, networks = \ self._create_instance_with_networks_json(networks=None) - LOG.debug(response) self.assertEquals(response.status_int, 202) self.assertEquals(networks, None) -- cgit From 4ec4ddd2e6465f0483ecf50d430458169ad4c348 Mon Sep 17 00:00:00 2001 From: Chris Behrens Date: Tue, 30 Aug 2011 15:12:43 -0700 Subject: make default carrot again and delay the import in rpc/__init__.py --- nova/rpc/__init__.py | 22 ++++++++++++++-------- 1 file changed, 14 insertions(+), 8 deletions(-) diff --git a/nova/rpc/__init__.py b/nova/rpc/__init__.py index 4f57a345d..32509fff6 100644 --- a/nova/rpc/__init__.py +++ b/nova/rpc/__init__.py @@ -23,27 +23,33 @@ from nova import flags FLAGS = flags.FLAGS flags.DEFINE_string('rpc_backend', - 'nova.rpc.impl_kombu', - "The messaging module to use, defaults to kombu.") + 'nova.rpc.impl_carrot', + "The messaging module to use, defaults to carrot.") -RPCIMPL = import_object(FLAGS.rpc_backend) +_RPCIMPL = None + +def get_impl(): + global _RPCIMPL + if _RPCIMPL is None: + _RPCIMPL = import_object(FLAGS.rpc_backend) + return _RPCIMPL def create_connection(new=True): - return RPCIMPL.create_connection(new=new) + return get_impl().create_connection(new=new) def call(context, topic, msg): - return RPCIMPL.call(context, topic, msg) + return get_impl().call(context, topic, msg) def cast(context, topic, msg): - return RPCIMPL.cast(context, topic, msg) + return get_impl().cast(context, topic, msg) def fanout_cast(context, topic, msg): - return RPCIMPL.fanout_cast(context, topic, msg) + return get_impl().fanout_cast(context, topic, msg) def multicall(context, topic, msg): - return RPCIMPL.multicall(context, topic, msg) + return get_impl().multicall(context, topic, msg) -- cgit From 5ccbce699880557f9c58d4d403487979d3604ccf Mon Sep 17 00:00:00 2001 From: Chris Behrens Date: Tue, 30 Aug 2011 15:30:16 -0700 Subject: pep8 fix --- nova/rpc/__init__.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/nova/rpc/__init__.py b/nova/rpc/__init__.py index 32509fff6..e29cd80e1 100644 --- a/nova/rpc/__init__.py +++ b/nova/rpc/__init__.py @@ -28,7 +28,9 @@ flags.DEFINE_string('rpc_backend', _RPCIMPL = None + def get_impl(): + """Delay import of rpc_backend until FLAGS are loaded.""" global _RPCIMPL if _RPCIMPL is None: _RPCIMPL = import_object(FLAGS.rpc_backend) -- cgit From 2e12e975ee9d4ab7a17eebb0e36714b56d6b1779 Mon Sep 17 00:00:00 2001 From: Chris Behrens Date: Tue, 30 Aug 2011 15:52:25 -0700 Subject: logging change when rpc pool creates new connection --- nova/rpc/impl_carrot.py | 2 +- nova/rpc/impl_kombu.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/nova/rpc/impl_carrot.py b/nova/rpc/impl_carrot.py index 1d23c1853..303a4ff88 100644 --- a/nova/rpc/impl_carrot.py +++ b/nova/rpc/impl_carrot.py @@ -152,7 +152,7 @@ class Pool(pools.Pool): # TODO(comstud): Timeout connections not used in a while def create(self): - LOG.debug('Creating new connection') + LOG.debug('Pool creating new connection') return Connection.instance(new=True) # Create a ConnectionPool to use for RPC calls. We'll order the diff --git a/nova/rpc/impl_kombu.py b/nova/rpc/impl_kombu.py index ffd6447da..8242bd177 100644 --- a/nova/rpc/impl_kombu.py +++ b/nova/rpc/impl_kombu.py @@ -491,7 +491,7 @@ class Pool(pools.Pool): # TODO(comstud): Timeout connections not used in a while def create(self): - LOG.debug('Creating new connection') + LOG.debug('Pool creating new connection') return Connection() # Create a ConnectionPool to use for RPC calls. We'll order the -- cgit From 66aa9a6306cde5db2039daaf11a8422619560a33 Mon Sep 17 00:00:00 2001 From: Anthony Young Date: Tue, 30 Aug 2011 20:45:51 -0700 Subject: fix FloatingIpAlreadyInUse to use correct string pattern, convert ApiErrors to 400 responses --- nova/api/openstack/contrib/floating_ips.py | 6 +++- nova/exception.py | 2 +- .../api/openstack/contrib/test_floating_ips.py | 32 ++++++++++++++++++++++ 3 files changed, 38 insertions(+), 2 deletions(-) diff --git a/nova/api/openstack/contrib/floating_ips.py b/nova/api/openstack/contrib/floating_ips.py index 40086f778..ad3094d52 100644 --- a/nova/api/openstack/contrib/floating_ips.py +++ b/nova/api/openstack/contrib/floating_ips.py @@ -138,7 +138,11 @@ class Floating_ips(extensions.ExtensionDescriptor): msg = _("Address not specified") raise webob.exc.HTTPBadRequest(explanation=msg) - self.compute_api.associate_floating_ip(context, instance_id, address) + try: + self.compute_api.associate_floating_ip(context, instance_id, + address) + except exception.ApiError, e: + raise webob.exc.HTTPBadRequest(explanation=e.message) return webob.Response(status_int=202) diff --git a/nova/exception.py b/nova/exception.py index b54981963..caa65146d 100644 --- a/nova/exception.py +++ b/nova/exception.py @@ -534,7 +534,7 @@ class NoMoreFloatingIps(FloatingIpNotFound): class FloatingIpAlreadyInUse(NovaException): - message = _("Floating ip %(address) already in use by %(fixed_ip).") + message = _("Floating ip %(address)s already in use by %(fixed_ip)s.") class NoFloatingIpsDefined(NotFound): diff --git a/nova/tests/api/openstack/contrib/test_floating_ips.py b/nova/tests/api/openstack/contrib/test_floating_ips.py index c14c29bf3..08148278d 100644 --- a/nova/tests/api/openstack/contrib/test_floating_ips.py +++ b/nova/tests/api/openstack/contrib/test_floating_ips.py @@ -205,6 +205,38 @@ class FloatingIpTest(test.TestCase): resp = req.get_response(fakes.wsgi_app()) self.assertEqual(resp.status_int, 202) + def test_associate_floating_ip_to_instance_wrong_project_id(self): + def fake_fixed_ip_get_by_address(ctx, address, session=None): + return {'address': address, 'network': {'multi_host': None, + 'host': 'fake'}} + self.stubs.Set(db.api, "fixed_ip_get_by_address", + fake_fixed_ip_get_by_address) + db.floating_ip_update(self.context, self.address, {'project_id': 'bad', + 'fixed_ip_id': 1}) + body = dict(addFloatingIp=dict(address=self.address)) + req = webob.Request.blank('/v1.1/123/servers/test_inst/action') + req.method = "POST" + req.body = json.dumps(body) + req.headers["content-type"] = "application/json" + resp = req.get_response(fakes.wsgi_app()) + self.assertEqual(resp.status_int, 400) + + def test_associate_floating_ip_to_instance_no_project_id(self): + def fake_fixed_ip_get_by_address(ctx, address, session=None): + return {'address': address, 'network': {'multi_host': None, + 'host': 'fake'}} + self.stubs.Set(db.api, "fixed_ip_get_by_address", + fake_fixed_ip_get_by_address) + db.floating_ip_update(self.context, self.address, {'project_id': None, + 'fixed_ip_id': 1}) + body = dict(addFloatingIp=dict(address=self.address)) + req = webob.Request.blank('/v1.1/123/servers/test_inst/action') + req.method = "POST" + req.body = json.dumps(body) + req.headers["content-type"] = "application/json" + resp = req.get_response(fakes.wsgi_app()) + self.assertEqual(resp.status_int, 400) + def test_add_associated_floating_ip_to_instance(self): def fake_fixed_ip_get_by_address(ctx, address, session=None): return {'address': address, 'network': {'multi_host': None, -- cgit From b7c98734f8829fb4b213869bdfca6481fbeab98e Mon Sep 17 00:00:00 2001 From: Chris Behrens Date: Tue, 30 Aug 2011 20:53:27 -0700 Subject: use kombu.connection.BrokerConnection vs kombu.connection.Connection so that older versions of kombu (1.0.4) work as well as newer. --- nova/rpc/impl_kombu.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nova/rpc/impl_kombu.py b/nova/rpc/impl_kombu.py index 8242bd177..ab70e7cfb 100644 --- a/nova/rpc/impl_kombu.py +++ b/nova/rpc/impl_kombu.py @@ -322,7 +322,7 @@ class Connection(object): except self.connection.connection_errors: pass time.sleep(1) - self.connection = kombu.connection.Connection(**self.params) + self.connection = kombu.connection.BrokerConnection(**self.params) if FLAGS.fake_rabbit: # Kludge to speed up tests. self.connection.transport.polling_interval = 0.0 -- cgit From e43ffb5137ba256a21b3241b549d7c66cb7e5e04 Mon Sep 17 00:00:00 2001 From: Chris Behrens Date: Tue, 30 Aug 2011 21:05:43 -0700 Subject: switched default to kombu per vishy --- nova/rpc/__init__.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/nova/rpc/__init__.py b/nova/rpc/__init__.py index e29cd80e1..c0cfdd5ce 100644 --- a/nova/rpc/__init__.py +++ b/nova/rpc/__init__.py @@ -23,8 +23,8 @@ from nova import flags FLAGS = flags.FLAGS flags.DEFINE_string('rpc_backend', - 'nova.rpc.impl_carrot', - "The messaging module to use, defaults to carrot.") + 'nova.rpc.impl_kombu', + "The messaging module to use, defaults to kombu.") _RPCIMPL = None -- cgit From 1477b8c33374db1166c6c67ff68e03c94f3436a5 Mon Sep 17 00:00:00 2001 From: Anthony Young Date: Tue, 30 Aug 2011 21:16:26 -0700 Subject: add test to verify 400 response when out of addresses --- nova/api/openstack/contrib/floating_ips.py | 2 +- nova/tests/api/openstack/contrib/test_floating_ips.py | 18 +++++++++++++++--- 2 files changed, 16 insertions(+), 4 deletions(-) diff --git a/nova/api/openstack/contrib/floating_ips.py b/nova/api/openstack/contrib/floating_ips.py index ad3094d52..99c0d1469 100644 --- a/nova/api/openstack/contrib/floating_ips.py +++ b/nova/api/openstack/contrib/floating_ips.py @@ -96,7 +96,7 @@ class FloatingIPController(object): except rpc.RemoteError as ex: # NOTE(tr3buchet) - why does this block exist? if ex.exc_type == 'NoMoreFloatingIps': - raise exception.NoMoreFloatingIps() + raise webob.exc.HTTPBadRequest(explanation=ex.message) else: raise diff --git a/nova/tests/api/openstack/contrib/test_floating_ips.py b/nova/tests/api/openstack/contrib/test_floating_ips.py index 08148278d..fc10f2f6c 100644 --- a/nova/tests/api/openstack/contrib/test_floating_ips.py +++ b/nova/tests/api/openstack/contrib/test_floating_ips.py @@ -20,8 +20,9 @@ import webob from nova import compute from nova import context from nova import db -from nova import test from nova import network +from nova import rpc +from nova import test from nova.tests.api.openstack import fakes from nova.tests.api.openstack import test_servers @@ -114,8 +115,6 @@ class FloatingIpTest(test.TestCase): network_api_get_floating_ip) self.stubs.Set(network.api.API, "list_floating_ips", network_api_list_floating_ips) - self.stubs.Set(network.api.API, "allocate_floating_ip", - network_api_allocate) self.stubs.Set(network.api.API, "release_floating_ip", network_api_release) self.stubs.Set(network.api.API, "disassociate_floating_ip", @@ -172,7 +171,20 @@ class FloatingIpTest(test.TestCase): self.assertEqual(res_dict['floating_ip']['ip'], '10.10.10.10') self.assertEqual(res_dict['floating_ip']['instance_id'], None) + def test_floating_ip_allocate_no_free_ips(self): + def fake_call(*args, **kwargs): + raise(rpc.RemoteError('NoMoreFloatingIps', '', '')) + + self.stubs.Set(rpc, "call", fake_call) + req = webob.Request.blank('/v1.1/123/os-floating-ips') + req.method = 'POST' + req.headers['Content-Type'] = 'application/json' + res = req.get_response(fakes.wsgi_app()) + self.assertEqual(res.status_int, 400) + def test_floating_ip_allocate(self): + self.stubs.Set(network.api.API, "allocate_floating_ip", + network_api_allocate) req = webob.Request.blank('/v1.1/123/os-floating-ips') req.method = 'POST' req.headers['Content-Type'] = 'application/json' -- cgit From c9a6681f484f38778987fbbaa352d07bd8f747c3 Mon Sep 17 00:00:00 2001 From: Brian Lamar Date: Wed, 31 Aug 2011 10:14:07 -0400 Subject: Removed extraneous import and s/vm_state.STOP/vm_states.STOPPED/ --- nova/db/sqlalchemy/api.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/nova/db/sqlalchemy/api.py b/nova/db/sqlalchemy/api.py index 24e1772f6..631e53ceb 100644 --- a/nova/db/sqlalchemy/api.py +++ b/nova/db/sqlalchemy/api.py @@ -28,6 +28,7 @@ from nova import flags from nova import ipv6 from nova import utils from nova import log as logging +from nova.compute import vm_states from nova.db.sqlalchemy import models from nova.db.sqlalchemy.session import get_session from sqlalchemy import or_ @@ -1102,11 +1103,10 @@ def instance_destroy(context, instance_id): def instance_stop(context, instance_id): session = get_session() with session.begin(): - from nova.compute import power_state session.query(models.Instance).\ filter_by(id=instance_id).\ update({'host': None, - 'vm_state': vm_state.STOP, + 'vm_state': vm_states.STOPPED, 'task_state': None, 'updated_at': literal_column('updated_at')}) session.query(models.SecurityGroupInstanceAssociation).\ -- cgit From ba8163fed57bcd1948be4cfb021fb32391702cc5 Mon Sep 17 00:00:00 2001 From: Chris Behrens Date: Wed, 31 Aug 2011 11:54:19 -0700 Subject: kludge for kombu 1.1.3 memory transport bug --- nova/rpc/impl_kombu.py | 12 +++++++++++- 1 file changed, 11 insertions(+), 1 deletion(-) diff --git a/nova/rpc/impl_kombu.py b/nova/rpc/impl_kombu.py index ab70e7cfb..b994a6a10 100644 --- a/nova/rpc/impl_kombu.py +++ b/nova/rpc/impl_kombu.py @@ -303,6 +303,7 @@ class Connection(object): self.interval_stepping = FLAGS.rabbit_retry_backoff # max retry-interval = 30 seconds self.interval_max = 30 + self.memory_transport = False self.params = dict(hostname=FLAGS.rabbit_host, port=FLAGS.rabbit_port, @@ -311,6 +312,9 @@ class Connection(object): virtual_host=FLAGS.rabbit_virtual_host) if FLAGS.fake_rabbit: self.params['transport'] = 'memory' + self.memory_transport = True + else: + self.memory_transport = False self.connection = None self.reconnect() @@ -323,7 +327,7 @@ class Connection(object): pass time.sleep(1) self.connection = kombu.connection.BrokerConnection(**self.params) - if FLAGS.fake_rabbit: + if self.memory_transport: # Kludge to speed up tests. self.connection.transport.polling_interval = 0.0 self.consumer_num = itertools.count(1) @@ -345,6 +349,9 @@ class Connection(object): LOG.info(_('Connected to AMQP server on %(hostname)s:%(port)d' % self.params)) self.channel = self.connection.channel() + # work around 'memory' transport bug in 1.1.3 + if self.memory_transport: + self.channel._new_queue('ae.undeliver') for consumer in self.consumers: consumer.reconnect(self.channel) if self.consumers: @@ -374,6 +381,9 @@ class Connection(object): self.cancel_consumer_thread() self.channel.close() self.channel = self.connection.channel() + # work around 'memory' transport bug in 1.1.3 + if self.memory_transport: + self.channel._new_queue('ae.undeliver') self.consumers = [] def declare_consumer(self, consumer_cls, topic, callback): -- cgit From abd6b240b5247a2981e86c1d3161306fb2b4c4c5 Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Wed, 31 Aug 2011 12:02:14 -0700 Subject: moved key_name per review --- nova/api/openstack/views/servers.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/nova/api/openstack/views/servers.py b/nova/api/openstack/views/servers.py index 35821f9a7..cd01c9373 100644 --- a/nova/api/openstack/views/servers.py +++ b/nova/api/openstack/views/servers.py @@ -79,8 +79,6 @@ class ViewBuilder(object): metadata[item['key']] = str(item['value']) inst_dict['metadata'] = metadata - inst_dict['key_name'] = inst.get('key_name', '') - inst_dict['hostId'] = '' if inst.get('host'): inst_dict['hostId'] = hashlib.sha224(inst['host']).hexdigest() @@ -190,6 +188,7 @@ class ViewBuilderV11(ViewBuilder): def _build_extra(self, response, inst): self._build_links(response, inst) response['uuid'] = inst['uuid'] + response['key_name'] = inst.get('key_name', '') self._build_config_drive(response, inst) def _build_links(self, response, inst): -- cgit From f687e978a41c78e10e0c371c5486298925b5857f Mon Sep 17 00:00:00 2001 From: Anthony Young Date: Wed, 31 Aug 2011 12:44:15 -0700 Subject: add explicit message for NoMoreFloatingIps exception --- nova/api/openstack/contrib/floating_ips.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/nova/api/openstack/contrib/floating_ips.py b/nova/api/openstack/contrib/floating_ips.py index 99c0d1469..6ce531c8f 100644 --- a/nova/api/openstack/contrib/floating_ips.py +++ b/nova/api/openstack/contrib/floating_ips.py @@ -96,7 +96,8 @@ class FloatingIPController(object): except rpc.RemoteError as ex: # NOTE(tr3buchet) - why does this block exist? if ex.exc_type == 'NoMoreFloatingIps': - raise webob.exc.HTTPBadRequest(explanation=ex.message) + msg = _("No more floating ips available.") + raise webob.exc.HTTPBadRequest(explanation=msg) else: raise -- cgit From c9758dd4832c167562baefad5dcc88f2a1a19b73 Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Wed, 31 Aug 2011 12:51:16 -0700 Subject: fix keypairs stubs --- nova/api/openstack/create_instance_helper.py | 1 + nova/tests/api/openstack/fakes.py | 7 +++++++ 2 files changed, 8 insertions(+) diff --git a/nova/api/openstack/create_instance_helper.py b/nova/api/openstack/create_instance_helper.py index d82cb534f..744353f31 100644 --- a/nova/api/openstack/create_instance_helper.py +++ b/nova/api/openstack/create_instance_helper.py @@ -189,6 +189,7 @@ class CreateInstanceHelper(object): raise exc.HTTPBadRequest(explanation=msg) except exception.KeypairNotFound as error: msg = _("Invalid key_name provided.") + raise exc.HTTPBadRequest(explanation=msg) except exception.SecurityGroupNotFound as error: raise exc.HTTPBadRequest(explanation=unicode(error)) except RemoteError as err: diff --git a/nova/tests/api/openstack/fakes.py b/nova/tests/api/openstack/fakes.py index a095dd90a..a4a90e6da 100644 --- a/nova/tests/api/openstack/fakes.py +++ b/nova/tests/api/openstack/fakes.py @@ -107,11 +107,18 @@ def stub_out_key_pair_funcs(stubs, have_key_pair=True): def key_pair(context, user_id): return [dict(name='key', public_key='public_key')] + def one_key_pair(context, user_id, name): + if name == 'key': + return dict(name='key', public_key='public_key') + else: + raise exc.KeypairNotFound(user_id=user_id, name=name) + def no_key_pair(context, user_id): return [] if have_key_pair: stubs.Set(nova.db, 'key_pair_get_all_by_user', key_pair) + stubs.Set(nova.db, 'key_pair_get', one_key_pair) else: stubs.Set(nova.db, 'key_pair_get_all_by_user', no_key_pair) -- cgit From a87a0bba9c7b046b36ee80bc033df5499cca35e1 Mon Sep 17 00:00:00 2001 From: Brian Waldon Date: Wed, 31 Aug 2011 17:05:01 -0400 Subject: adding support for limiting in image service; updating tests with fixture ids and marker support --- nova/image/glance.py | 9 +++++++++ nova/tests/image/test_glance.py | 29 ++++++++++++++++++++++------- 2 files changed, 31 insertions(+), 7 deletions(-) diff --git a/nova/image/glance.py b/nova/image/glance.py index b5f52351f..7233eb18d 100644 --- a/nova/image/glance.py +++ b/nova/image/glance.py @@ -154,6 +154,15 @@ class GlanceImageService(service.BaseImageService): except KeyError: raise exception.ImagePaginationFailed() + try: + kwargs['limit'] = kwargs['limit'] - len(images) + # break if we have reached a provided limit + if kwargs['limit'] <= 0: + return + except KeyError: + # ignore missing limit, just proceed without it + pass + for image in self._fetch_images(fetch_func, **kwargs): yield image diff --git a/nova/tests/image/test_glance.py b/nova/tests/image/test_glance.py index 0ff508ffa..5df25df37 100644 --- a/nova/tests/image/test_glance.py +++ b/nova/tests/image/test_glance.py @@ -38,7 +38,16 @@ class StubGlanceClient(object): return self.images[image_id] def get_images_detailed(self, filters=None, marker=None, limit=None): - return self.images.itervalues() + images = self.images.values() + if marker is None: + index = 0 + else: + for index, image in enumerate(images): + if image['id'] == marker: + index += 1 + break + # default to a page size of 3 to ensure we flex the pagination code + return images[index:index + 3] def get_image(self, image_id): return self.images[image_id], [] @@ -86,23 +95,23 @@ class TestGlanceImageServiceProperties(BaseGlanceTest): """Ensure attributes which aren't BASE_IMAGE_ATTRS are stored in the properties dict """ - fixtures = {'image1': {'name': 'image1', 'is_public': True, + fixtures = {'image1': {'id': '1', 'name': 'image1', 'is_public': True, 'foo': 'bar', 'properties': {'prop1': 'propvalue1'}}} self.client.images = fixtures image_meta = self.service.show(self.context, 'image1') - expected = {'name': 'image1', 'is_public': True, + expected = {'id': '1', 'name': 'image1', 'is_public': True, 'properties': {'prop1': 'propvalue1', 'foo': 'bar'}} self.assertEqual(image_meta, expected) def test_detail_passes_through_to_client(self): - fixtures = {'image1': {'name': 'image1', 'is_public': True, + fixtures = {'image1': {'id': '1', 'name': 'image1', 'is_public': True, 'foo': 'bar', 'properties': {'prop1': 'propvalue1'}}} self.client.images = fixtures image_meta = self.service.detail(self.context) - expected = [{'name': 'image1', 'is_public': True, + expected = [{'id': '1', 'name': 'image1', 'is_public': True, 'properties': {'prop1': 'propvalue1', 'foo': 'bar'}}] self.assertEqual(image_meta, expected) @@ -166,6 +175,7 @@ class TestGetterDateTimeNoneTests(BaseGlanceTest): def _make_datetime_fixtures(self): fixtures = { 'image1': { + 'id': '1', 'name': 'image1', 'is_public': True, 'created_at': self.NOW_GLANCE_FORMAT, @@ -173,6 +183,7 @@ class TestGetterDateTimeNoneTests(BaseGlanceTest): 'deleted_at': self.NOW_GLANCE_FORMAT, }, 'image2': { + 'id': '2', 'name': 'image2', 'is_public': True, 'created_at': self.NOW_GLANCE_OLD_FORMAT, @@ -183,13 +194,17 @@ class TestGetterDateTimeNoneTests(BaseGlanceTest): return fixtures def _make_none_datetime_fixtures(self): - fixtures = {'image1': {'name': 'image1', 'is_public': True, + fixtures = {'image1': {'id': '1', + 'name': 'image1', + 'is_public': True, 'updated_at': None, 'deleted_at': None}} return fixtures def _make_blank_datetime_fixtures(self): - fixtures = {'image1': {'name': 'image1', 'is_public': True, + fixtures = {'image1': {'id': '1', + 'name': 'image1', + 'is_public': True, 'updated_at': '', 'deleted_at': ''}} return fixtures -- cgit From 9de8a589b4ee0e007267efe2394b504382e4cdc1 Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Wed, 31 Aug 2011 14:58:55 -0700 Subject: change to use _get_key_name to retrieve the key --- nova/api/openstack/servers.py | 28 +++++++++++++++++----------- 1 file changed, 17 insertions(+), 11 deletions(-) diff --git a/nova/api/openstack/servers.py b/nova/api/openstack/servers.py index f288f2228..53684fa52 100644 --- a/nova/api/openstack/servers.py +++ b/nova/api/openstack/servers.py @@ -144,8 +144,15 @@ class Controller(object): except exception.NotFound: raise exc.HTTPNotFound() + def _get_key_name(self, req, body): + """ Get default keypair if not set """ + raise NotImplementedError() + def create(self, req, body): """ Creates a new server for a given user """ + if 'server' in body: + body['server']['key_name'] = self._get_key_name(req, body) + extra_values = None extra_values, instances = self.helper.create_instance( req, body, self.compute_api.create) @@ -564,17 +571,12 @@ class ControllerV10(Controller): raise exc.HTTPNotFound() return webob.Response(status_int=202) - def create(self, req, body): - """ Creates a new server for a given user """ - # note(ja): v1.0 injects the first keypair for the project for testing - if 'server' in body and not 'key_name' in body['server']: - context = req.environ["nova.context"] - keypairs = db.key_pair_get_all_by_user(context.elevated(), - context.user_id) - if keypairs: - body['server']['key_name'] = keypairs[0]['name'] - - return super(ControllerV10, self).create(req, body) + def _get_key_name(self, req, body): + context = req.environ["nova.context"] + keypairs = db.key_pair_get_all_by_user(context, + context.user_id) + if keypairs: + return keypairs[0]['name'] def _image_ref_from_req_data(self, data): return data['server']['imageId'] @@ -647,6 +649,10 @@ class ControllerV11(Controller): except exception.NotFound: raise exc.HTTPNotFound() + def _get_key_name(self, req, body): + if 'server' in body: + return body['server'].get('key_name') + def _image_ref_from_req_data(self, data): try: return data['server']['imageRef'] -- cgit From 642c9ceb1bae9fa5ba008cb69c47f449ea173c3a Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Wed, 31 Aug 2011 15:24:29 -0700 Subject: expect key_name attribute in 1.1 --- nova/tests/api/openstack/test_servers.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/nova/tests/api/openstack/test_servers.py b/nova/tests/api/openstack/test_servers.py index c54bead49..78c872a28 100644 --- a/nova/tests/api/openstack/test_servers.py +++ b/nova/tests/api/openstack/test_servers.py @@ -3630,6 +3630,7 @@ class ServersViewBuilderV11Test(test.TestCase): "id": 1, "uuid": self.instance['uuid'], "name": "test_server", + "key_name": '', "links": [ { "rel": "self", @@ -3653,6 +3654,7 @@ class ServersViewBuilderV11Test(test.TestCase): "id": 1, "uuid": self.instance['uuid'], "name": "test_server", + "key_name": '', "config_drive": None, "links": [ { -- cgit From bd917feb287a3d0e8f2f9f9c60b716c7f599f4ff Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Wed, 31 Aug 2011 16:13:55 -0700 Subject: remove extra test --- nova/tests/api/openstack/test_server_actions.py | 13 ------------- 1 file changed, 13 deletions(-) diff --git a/nova/tests/api/openstack/test_server_actions.py b/nova/tests/api/openstack/test_server_actions.py index 0c5fab41e..b9ef41465 100644 --- a/nova/tests/api/openstack/test_server_actions.py +++ b/nova/tests/api/openstack/test_server_actions.py @@ -244,19 +244,6 @@ class ServerActionsTest(test.TestCase): res = req.get_response(fakes.wsgi_app()) self.assertEqual(res.status_int, 500) - def test_resized_server_has_correct_status(self): - req = self.webreq('/1', 'GET') - - def fake_migration_get(*args): - return {} - - self.stubs.Set(nova.db.api, 'migration_get_by_instance_and_status', - fake_migration_get) - res = req.get_response(fakes.wsgi_app()) - self.assertEqual(res.status_int, 200) - body = json.loads(res.body) - self.assertEqual(body['server']['status'], 'RESIZE-CONFIRM') - def test_confirm_resize_server(self): req = self.webreq('/1/action', 'POST', dict(confirmResize=None)) -- cgit From e1cd8f036f34fc622416e74a302959c9e50a798c Mon Sep 17 00:00:00 2001 From: Tushar Patil Date: Wed, 31 Aug 2011 17:06:15 -0700 Subject: Fix for LP Bug #838251 --- nova/api/ec2/cloud.py | 12 ++---------- nova/api/openstack/contrib/createserverext.py | 24 +++++++++++++++++++----- nova/api/openstack/servers.py | 18 ++++++++++++++++++ nova/utils.py | 9 +++++++++ 4 files changed, 48 insertions(+), 15 deletions(-) diff --git a/nova/api/ec2/cloud.py b/nova/api/ec2/cloud.py index 9aebf92e3..d3b3a21ef 100644 --- a/nova/api/ec2/cloud.py +++ b/nova/api/ec2/cloud.py @@ -995,14 +995,6 @@ class CloudController(object): 'status': volume['attach_status'], 'volumeId': ec2utils.id_to_ec2_vol_id(volume_id)} - @staticmethod - def _convert_to_set(lst, label): - if lst is None or lst == []: - return None - if not isinstance(lst, list): - lst = [lst] - return [{label: x} for x in lst] - def _format_kernel_id(self, instance_ref, result, key): kernel_id = instance_ref['kernel_id'] if kernel_id is None: @@ -1160,7 +1152,7 @@ class CloudController(object): if instance.get('security_groups'): for security_group in instance['security_groups']: security_group_names.append(security_group['name']) - result['groupSet'] = CloudController._convert_to_set( + result['groupSet'] = utils.convert_to_set( security_group_names, 'groupId') def _format_instances(self, context, instance_id=None, use_v6=False, @@ -1224,7 +1216,7 @@ class CloudController(object): i['keyName'] = '%s (%s, %s)' % (i['keyName'], instance['project_id'], instance['host']) - i['productCodesSet'] = self._convert_to_set([], 'product_codes') + i['productCodesSet'] = utils.convert_to_set([], 'product_codes') self._format_instance_type(instance, i) i['launchTime'] = instance['created_at'] i['amiLaunchIndex'] = instance['launch_index'] diff --git a/nova/api/openstack/contrib/createserverext.py b/nova/api/openstack/contrib/createserverext.py index ba72fdb0b..4cc093891 100644 --- a/nova/api/openstack/contrib/createserverext.py +++ b/nova/api/openstack/contrib/createserverext.py @@ -14,18 +14,32 @@ # License for the specific language governing permissions and limitations # under the License +from nova import utils from nova.api.openstack import create_instance_helper as helper from nova.api.openstack import extensions from nova.api.openstack import servers from nova.api.openstack import wsgi -class Createserverext(extensions.ExtensionDescriptor): - """The servers create ext +class CreateServerController(servers.ControllerV11): + def _build_view(self, req, instance, is_detail=False): + server = super(CreateServerController, self)._build_view(req, + instance, + is_detail) + if is_detail: + self._build_security_groups(server['server'], instance) + return server + + def _build_security_groups(self, response, inst): + sg_names = [] + if inst.get('security_groups'): + sg_names = [security_group['name'] for security_group in \ + inst['security_groups']] + response['security_groups'] = utils.convert_to_set(sg_names, 'name') - Exposes addFixedIp and removeFixedIp actions on servers. - """ +class Createserverext(extensions.ExtensionDescriptor): + """The servers create ext""" def get_name(self): return "Createserverext" @@ -58,7 +72,7 @@ class Createserverext(extensions.ExtensionDescriptor): deserializer = wsgi.RequestDeserializer(body_deserializers) res = extensions.ResourceExtension('os-create-server-ext', - controller=servers.ControllerV11(), + controller=CreateServerController(), deserializer=deserializer, serializer=serializer) resources.append(res) diff --git a/nova/api/openstack/servers.py b/nova/api/openstack/servers.py index 27c67e79e..e2cb46165 100644 --- a/nova/api/openstack/servers.py +++ b/nova/api/openstack/servers.py @@ -912,6 +912,11 @@ class ServerXMLSerializer(wsgi.XMLDictSerializer): server['addresses']) server_node.appendChild(addresses_node) + if 'security_groups' in server: + security_groups_node = self._create_security_groups_node(xml_doc, + server['security_groups']) + server_node.appendChild(security_groups_node) + return server_node def _server_list_to_xml(self, xml_doc, servers, detailed): @@ -964,6 +969,19 @@ class ServerXMLSerializer(wsgi.XMLDictSerializer): server_dict['server']) return self.to_xml_string(node, True) + def _security_group_to_xml(self, doc, security_group): + node = doc.createElement('security_group') + node.setAttribute('name', str(security_group.get('name'))) + return node + + def _create_security_groups_node(self, xml_doc, security_groups): + security_groups_node = xml_doc.createElement('security_groups') + if security_groups: + for security_group in security_groups: + node = self._security_group_to_xml(xml_doc, security_group) + security_groups_node.appendChild(node) + return security_groups_node + def create_resource(version='1.0'): controller = { diff --git a/nova/utils.py b/nova/utils.py index 21e6221b2..4855f6fd6 100644 --- a/nova/utils.py +++ b/nova/utils.py @@ -901,3 +901,12 @@ def monkey_patch(): func = import_class("%s.%s" % (module, key)) setattr(sys.modules[module], key,\ decorator("%s.%s" % (module, key), func)) + + +def convert_to_set(lst, label): + """Convert a value or list into a set""" + if lst is None or lst == []: + return None + if not isinstance(lst, list): + lst = [lst] + return [{label: x} for x in lst] -- cgit From 20beec509aff1bb3a30e9f1d95d3e2825f2b38ea Mon Sep 17 00:00:00 2001 From: Tushar Patil Date: Wed, 31 Aug 2011 17:30:11 -0700 Subject: Fix for LP Bug #838466 --- nova/compute/api.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/nova/compute/api.py b/nova/compute/api.py index e045ef3de..6806522f7 100644 --- a/nova/compute/api.py +++ b/nova/compute/api.py @@ -383,10 +383,6 @@ class API(base.Base): If you are changing this method, be sure to update both call paths. """ - instance = dict(launch_index=num, **base_options) - instance = self.db.instance_create(context, instance) - instance_id = instance['id'] - elevated = context.elevated() if security_group is None: security_group = ['default'] @@ -400,6 +396,10 @@ class API(base.Base): security_group_name) security_groups.append(group['id']) + instance = dict(launch_index=num, **base_options) + instance = self.db.instance_create(context, instance) + instance_id = instance['id'] + for security_group_id in security_groups: self.db.instance_add_security_group(elevated, instance_id, -- cgit From fdf076a04e001b897d01b2a8c4a9e3c980ea8f94 Mon Sep 17 00:00:00 2001 From: Anthony Young Date: Wed, 31 Aug 2011 21:34:10 -0700 Subject: fix for lp838583 - return instance_id for associated floating_ips, add test --- nova/api/openstack/contrib/floating_ips.py | 6 +++--- .../api/openstack/contrib/test_floating_ips.py | 21 +++++++++++++++++---- 2 files changed, 20 insertions(+), 7 deletions(-) diff --git a/nova/api/openstack/contrib/floating_ips.py b/nova/api/openstack/contrib/floating_ips.py index 6ce531c8f..d1add8f83 100644 --- a/nova/api/openstack/contrib/floating_ips.py +++ b/nova/api/openstack/contrib/floating_ips.py @@ -36,9 +36,9 @@ def _translate_floating_ip_view(floating_ip): result['fixed_ip'] = floating_ip['fixed_ip']['address'] except (TypeError, KeyError): result['fixed_ip'] = None - if 'instance' in floating_ip: - result['instance_id'] = floating_ip['instance']['id'] - else: + try: + result['instance_id'] = floating_ip['fixed_ip']['instance_id'] + except (TypeError, KeyError): result['instance_id'] = None return {'floating_ip': result} diff --git a/nova/tests/api/openstack/contrib/test_floating_ips.py b/nova/tests/api/openstack/contrib/test_floating_ips.py index fc10f2f6c..642f2b841 100644 --- a/nova/tests/api/openstack/contrib/test_floating_ips.py +++ b/nova/tests/api/openstack/contrib/test_floating_ips.py @@ -38,14 +38,13 @@ def network_api_get_floating_ip(self, context, id): def network_api_get_floating_ip_by_ip(self, context, address): return {'id': 1, 'address': '10.10.10.10', - 'fixed_ip': {'address': '11.0.0.1'}} + 'fixed_ip': {'address': '10.0.0.1', 'instance_id': 1}}, def network_api_list_floating_ips(self, context): return [{'id': 1, 'address': '10.10.10.10', - 'instance': {'id': 11}, - 'fixed_ip': {'address': '10.0.0.1'}}, + 'fixed_ip': {'address': '10.0.0.1', 'instance_id': 1}}, {'id': 2, 'address': '10.10.10.11'}] @@ -152,7 +151,7 @@ class FloatingIpTest(test.TestCase): res = req.get_response(fakes.wsgi_app()) self.assertEqual(res.status_int, 200) res_dict = json.loads(res.body) - response = {'floating_ips': [{'instance_id': 11, + response = {'floating_ips': [{'instance_id': 1, 'ip': '10.10.10.10', 'fixed_ip': '10.0.0.1', 'id': 1}, @@ -171,6 +170,20 @@ class FloatingIpTest(test.TestCase): self.assertEqual(res_dict['floating_ip']['ip'], '10.10.10.10') self.assertEqual(res_dict['floating_ip']['instance_id'], None) + def test_show_associated_floating_ip(self): + def get_floating_ip(self, context, id): + return {'id': 1, 'address': '10.10.10.10', + 'fixed_ip': {'address': '10.0.0.1', 'instance_id': 1}} + self.stubs.Set(network.api.API, "get_floating_ip", get_floating_ip) + + req = webob.Request.blank('/v1.1/123/os-floating-ips/1') + res = req.get_response(fakes.wsgi_app()) + self.assertEqual(res.status_int, 200) + res_dict = json.loads(res.body) + self.assertEqual(res_dict['floating_ip']['id'], 1) + self.assertEqual(res_dict['floating_ip']['ip'], '10.10.10.10') + self.assertEqual(res_dict['floating_ip']['instance_id'], 1) + def test_floating_ip_allocate_no_free_ips(self): def fake_call(*args, **kwargs): raise(rpc.RemoteError('NoMoreFloatingIps', '', '')) -- cgit From dd5eeafbfe1013fd9acdb119933cb5bf986706e6 Mon Sep 17 00:00:00 2001 From: Christopher MacGown Date: Thu, 1 Sep 2011 12:05:21 -0700 Subject: Adds test for image.glance.GlanceImageService._is_image_available --- nova/tests/image/test_glance.py | 26 ++++++++++++++++++++++++++ 1 file changed, 26 insertions(+) diff --git a/nova/tests/image/test_glance.py b/nova/tests/image/test_glance.py index 0ff508ffa..81a54346e 100644 --- a/nova/tests/image/test_glance.py +++ b/nova/tests/image/test_glance.py @@ -20,6 +20,7 @@ import datetime import unittest from nova import context +from nova import exception from nova import test from nova.image import glance @@ -96,6 +97,31 @@ class TestGlanceImageServiceProperties(BaseGlanceTest): 'properties': {'prop1': 'propvalue1', 'foo': 'bar'}} self.assertEqual(image_meta, expected) + def test_show_raises_when_no_authtoken_in_the_context(self): + fixtures = {'image1': {'name': 'image1', 'is_public': False, + 'foo': 'bar', + 'properties': {'prop1': 'propvalue1'}}} + self.client.images = fixtures + self.context.auth_token = False + + expected = {'name': 'image1', 'is_public': True, + 'properties': {'prop1': 'propvalue1', 'foo': 'bar'}} + self.assertRaises(exception.ImageNotFound, + self.service.show, self.context, 'image1') + + def test_show_passes_through_to_client_with_authtoken_in_context(self): + fixtures = {'image1': {'name': 'image1', 'is_public': False, + 'foo': 'bar', + 'properties': {'prop1': 'propvalue1'}}} + self.client.images = fixtures + self.context.auth_token = True + + expected = {'name': 'image1', 'is_public': False, + 'properties': {'prop1': 'propvalue1', 'foo': 'bar'}} + + image_meta = self.service.show(self.context, 'image1') + self.assertEqual(image_meta, expected) + def test_detail_passes_through_to_client(self): fixtures = {'image1': {'name': 'image1', 'is_public': True, 'foo': 'bar', -- cgit From 59be9be68c0fd9b33b72257b8a1eb8c357ce9217 Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Thu, 1 Sep 2011 12:22:32 -0700 Subject: remove extra references to state_description --- bin/nova-manage | 6 +++--- nova/api/ec2/admin.py | 5 ++--- nova/api/openstack/contrib/simple_tenant_usage.py | 2 +- nova/tests/test_libvirt.py | 10 ++++++---- 4 files changed, 12 insertions(+), 11 deletions(-) diff --git a/bin/nova-manage b/bin/nova-manage index c9cf4266d..c3b2c71ce 100755 --- a/bin/nova-manage +++ b/bin/nova-manage @@ -166,7 +166,7 @@ class VpnCommands(object): print address, print vpn['host'], print ec2utils.id_to_ec2_id(vpn['id']), - print vpn['state_description'], + print vpn['vm_state'], print state else: print None @@ -869,7 +869,7 @@ class VmCommands(object): instance['hostname'], instance['host'], instance['instance_type'].name, - instance['state_description'], + instance['vm_state'], instance['launched_at'], instance['image_ref'], instance['kernel_id'], @@ -1223,7 +1223,7 @@ class VsaCommands(object): type=vc['instance_type']['name'], fl_ip=floating_addr, fx_ip=fixed_addr, - stat=vc['state_description'], + stat=vc['vm_state'], host=vc['host'], time=str(vc['created_at'])) diff --git a/nova/api/ec2/admin.py b/nova/api/ec2/admin.py index dfbbc0a2b..75e029509 100644 --- a/nova/api/ec2/admin.py +++ b/nova/api/ec2/admin.py @@ -21,7 +21,6 @@ Admin API controller, exposed through http via the api worker. """ import base64 -import datetime import netaddr import urllib @@ -33,6 +32,7 @@ from nova import log as logging from nova import utils from nova.api.ec2 import ec2utils from nova.auth import manager +from nova.compute import vm_states FLAGS = flags.FLAGS @@ -273,8 +273,7 @@ class AdminController(object): """Get the VPN instance for a project ID.""" for instance in db.instance_get_all_by_project(context, project_id): if (instance['image_id'] == str(FLAGS.vpn_image_id) - and not instance['state_description'] in - ['shutting_down', 'shutdown']): + and not instance['vm_state'] in [vm_states.DELETED]): return instance def start_vpn(self, context, project): diff --git a/nova/api/openstack/contrib/simple_tenant_usage.py b/nova/api/openstack/contrib/simple_tenant_usage.py index 69b38e229..42691a9fa 100644 --- a/nova/api/openstack/contrib/simple_tenant_usage.py +++ b/nova/api/openstack/contrib/simple_tenant_usage.py @@ -116,7 +116,7 @@ class SimpleTenantUsageController(object): if info['ended_at']: info['state'] = 'terminated' else: - info['state'] = instance['state_description'] + info['state'] = instance['vm_state'] now = datetime.utcnow() diff --git a/nova/tests/test_libvirt.py b/nova/tests/test_libvirt.py index 6a213b4f0..8c6775b29 100644 --- a/nova/tests/test_libvirt.py +++ b/nova/tests/test_libvirt.py @@ -34,6 +34,7 @@ from nova import test from nova import utils from nova.api.ec2 import cloud from nova.compute import power_state +from nova.compute import vm_states from nova.virt.libvirt import connection from nova.virt.libvirt import firewall @@ -674,8 +675,9 @@ class LibvirtConnTestCase(test.TestCase): # Preparing data self.compute = utils.import_object(FLAGS.compute_manager) - instance_dict = {'host': 'fake', 'state': power_state.RUNNING, - 'state_description': 'running'} + instance_dict = {'host': 'fake', + 'power_state': power_state.RUNNING, + 'vm_state': vm_states.ACTIVE} instance_ref = db.instance_create(self.context, self.test_instance) instance_ref = db.instance_update(self.context, instance_ref['id'], instance_dict) @@ -713,8 +715,8 @@ class LibvirtConnTestCase(test.TestCase): self.compute.rollback_live_migration) instance_ref = db.instance_get(self.context, instance_ref['id']) - self.assertTrue(instance_ref['state_description'] == 'running') - self.assertTrue(instance_ref['state'] == power_state.RUNNING) + self.assertTrue(instance_ref['vm_state'] == vm_states.ACTIVE) + self.assertTrue(instance_ref['power_state'] == power_state.RUNNING) volume_ref = db.volume_get(self.context, volume_ref['id']) self.assertTrue(volume_ref['status'] == 'in-use') -- cgit From 43a392814150e49769e935f4972c9901612570af Mon Sep 17 00:00:00 2001 From: Tushar Patil Date: Thu, 1 Sep 2011 14:03:22 -0700 Subject: added unit tests and cleanup of import statements --- .../api/openstack/contrib/test_createserverext.py | 113 +++++++++++++++++---- 1 file changed, 95 insertions(+), 18 deletions(-) diff --git a/nova/tests/api/openstack/contrib/test_createserverext.py b/nova/tests/api/openstack/contrib/test_createserverext.py index e5eed14fe..739312399 100644 --- a/nova/tests/api/openstack/contrib/test_createserverext.py +++ b/nova/tests/api/openstack/contrib/test_createserverext.py @@ -15,7 +15,7 @@ # License for the specific language governing permissions and limitations # under the License. -import base64 +import datetime import json import unittest from xml.dom import minidom @@ -26,15 +26,7 @@ import webob from nova import exception from nova import flags from nova import test -from nova import utils import nova.api.openstack -from nova.api.openstack import servers -from nova.api.openstack.contrib import createserverext -import nova.compute.api - -import nova.scheduler.api -import nova.image.fake -import nova.rpc from nova.tests.api.openstack import fakes @@ -51,22 +43,41 @@ DUPLICATE_NETWORKS = [('aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa', '10.0.1.12'), INVALID_NETWORKS = [('invalid', 'invalid-ip-address')] +INSTANCE = { + "id": 1, + "display_name": "test_server", + "uuid": FAKE_UUID, + "created_at": datetime.datetime(2010, 10, 10, 12, 0, 0), + "updated_at": datetime.datetime(2010, 11, 11, 11, 0, 0), + "security_groups": [{"id": 1, "name": "test"}] + } + +def return_server_by_id(context, id, session=None): + INSTANCE['id'] = id + return INSTANCE + + +def return_security_group_non_existing(context, project_id, group_name): + raise exception.SecurityGroupNotFoundForProject(project_id=project_id, + security_group_id=group_name) + +def return_security_group_get_by_name(context, project_id, group_name): + return {'id': 1, 'name': group_name} + + +def return_security_group_get(context, security_group_id, session): + return {'id': security_group_id} + + +def return_instance_add_security_group(context, instance_id, security_group_id): + pass class CreateserverextTest(test.TestCase): def setUp(self): super(CreateserverextTest, self).setUp() - self.stubs = stubout.StubOutForTesting() - fakes.FakeAuthManager.auth_data = {} - fakes.FakeAuthDatabase.data = {} - fakes.stub_out_auth(self.stubs) - fakes.stub_out_image_service(self.stubs) - fakes.stub_out_key_pair_funcs(self.stubs) - self.allow_admin = FLAGS.allow_admin_api def tearDown(self): - self.stubs.UnsetAll() - FLAGS.allow_admin_api = self.allow_admin super(CreateserverextTest, self).tearDown() def _setup_mock_compute_api(self): @@ -87,6 +98,7 @@ class CreateserverextTest(test.TestCase): self.networks = kwargs['requested_networks'] else: self.networks = None + return [{'id': '1234', 'display_name': 'fakeinstance', 'uuid': FAKE_UUID, 'created_at': "", @@ -107,6 +119,18 @@ class CreateserverextTest(test.TestCase): '_get_kernel_ramdisk_from_image', make_stub_method((1, 1))) return compute_api + def _create_security_group_request_dict(self, security_groups): + server = {} + server['name'] = 'new-server-test' + server['imageRef'] = 1 + server['flavorRef'] = 1 + if security_groups is not None: + sg_list = [] + for name in security_groups: + sg_list.append({'name': name}) + server['security_groups'] = sg_list + return {'server': server} + def _create_networks_request_dict(self, networks): server = {} server['name'] = 'new-server-test' @@ -304,3 +328,56 @@ class CreateserverextTest(test.TestCase): self.assertEquals(response.status_int, 202) self.assertEquals(compute_api.networks, [('aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa', None)]) + + def test_create_instance_with_security_group_empty_list_json(self): + security_groups = [] + body_dict = self._create_security_group_request_dict(security_groups) + request = self._get_create_request_json(body_dict) + response = request.get_response(fakes.wsgi_app()) + self.assertEquals(response.status_int, 202) + res_dict = json.loads(response.body) + self.assertEquals([{'name': "default"}], res_dict['server']['security_groups']) + + def test_create_instance_with_security_group_non_existing_json(self): + security_groups = ['non-existing'] + self.stubs.Set(nova.db, 'security_group_get_by_name', + return_security_group_non_existing) + body_dict = self._create_security_group_request_dict(security_groups) + request = self._get_create_request_json(body_dict) + response = request.get_response(fakes.wsgi_app()) + self.assertEquals(response.status_int, 400) + + def test_create_instance_with_security_group_json(self): + security_groups = ['test', 'test1'] + self.stubs.Set(nova.db, 'security_group_get_by_name', + return_security_group_get_by_name) + self.stubs.Set(nova.db.api, 'instance_add_security_group', + return_instance_add_security_group) + body_dict = self._create_security_group_request_dict(security_groups) + request = self._get_create_request_json(body_dict) + response = request.get_response(fakes.wsgi_app()) + self.assertEquals(response.status_int, 202) + + def test_get_server_by_id_verify_security_groups_json(self): + self.stubs.Set(nova.db.api, 'instance_get', return_server_by_id) + req = webob.Request.blank('/v1.1/123/os-create-server-ext/1') + req.headers['Content-Type'] = 'application/json' + response = req.get_response(fakes.wsgi_app()) + self.assertEquals(response.status_int, 200) + res_dict = json.loads(response.body) + expected_security_group = [{"name": "test"}] + self.assertEquals(res_dict['server']['security_groups'], + expected_security_group) + + def test_get_server_by_id_verify_security_groups_xml(self): + self.stubs.Set(nova.db.api, 'instance_get', return_server_by_id) + req = webob.Request.blank('/v1.1/123/os-create-server-ext/1') + req.headers['Accept'] = 'application/xml' + response = req.get_response(fakes.wsgi_app()) + self.assertEquals(response.status_int, 200) + dom = minidom.parseString(response.body) + server = dom.childNodes[0] + security_groups = server.getElementsByTagName('security_groups')[0] + security_group = security_groups.getElementsByTagName('security_group')[0] + self.assertEqual(INSTANCE['security_groups'][0]['name'], security_group.getAttribute("name")) + -- cgit From 2d2d9a5f5caed27d9ade06b2dbc56b793b7e5d3b Mon Sep 17 00:00:00 2001 From: Tushar Patil Date: Thu, 1 Sep 2011 14:32:48 -0700 Subject: Deleted debug messages --- nova/tests/api/openstack/contrib/test_createserverext.py | 3 --- 1 file changed, 3 deletions(-) diff --git a/nova/tests/api/openstack/contrib/test_createserverext.py b/nova/tests/api/openstack/contrib/test_createserverext.py index d6600d054..ba8fb925e 100644 --- a/nova/tests/api/openstack/contrib/test_createserverext.py +++ b/nova/tests/api/openstack/contrib/test_createserverext.py @@ -32,8 +32,6 @@ import nova.api.openstack from nova.tests.api.openstack import fakes -from nova import log as logging -LOG = logging.getLogger("api.nova.openstack.etere") FLAGS = flags.FLAGS FLAGS.verbose = True @@ -388,7 +386,6 @@ class CreateserverextTest(test.TestCase): body_dict = self._create_security_group_request_dict(security_groups) request = self._get_create_request_json(body_dict) response = request.get_response(fakes.wsgi_app()) - LOG.debug(response) self.assertEquals(response.status_int, 202) def test_get_server_by_id_verify_security_groups_json(self): -- cgit From e6e3f46bf449fa371a584720c12c21e0832f4160 Mon Sep 17 00:00:00 2001 From: "Kevin L. Mitchell" Date: Thu, 1 Sep 2011 17:15:21 -0500 Subject: import filters in scheduler/host_filter.py so default_host_filter gets added to FLAGS; rework SchedulerManager() to only catch missing 'schedule_' attribute and report other missing attributes --- nova/scheduler/host_filter.py | 1 + nova/scheduler/manager.py | 10 ++++++---- 2 files changed, 7 insertions(+), 4 deletions(-) diff --git a/nova/scheduler/host_filter.py b/nova/scheduler/host_filter.py index 826a99b0a..4024ec854 100644 --- a/nova/scheduler/host_filter.py +++ b/nova/scheduler/host_filter.py @@ -31,6 +31,7 @@ import types from nova import exception from nova import flags import nova.scheduler +from nova.scheduler import filters FLAGS = flags.FLAGS diff --git a/nova/scheduler/manager.py b/nova/scheduler/manager.py index 0e395ee79..92bb1ed6e 100644 --- a/nova/scheduler/manager.py +++ b/nova/scheduler/manager.py @@ -93,12 +93,14 @@ class SchedulerManager(manager.Manager): driver_method = 'schedule_%s' % method elevated = context.elevated() try: - host = getattr(self.driver, driver_method)(elevated, *args, - **kwargs) + real_meth = getattr(self.driver, driver_method) + args = (elevated,) + args except AttributeError, e: LOG.warning(_("Driver Method %(driver_method)s missing: %(e)s." - "Reverting to schedule()") % locals()) - host = self.driver.schedule(elevated, topic, *args, **kwargs) + "Reverting to schedule()") % locals()) + real_meth = self.driver.schedule + args = (elevated, topic) + args + real_meth(*args, **kwargs) if not host: LOG.debug(_("%(topic)s %(method)s handled in Scheduler") -- cgit From f119805aa7c8e2dd7f0bafe666d976f3a0c08795 Mon Sep 17 00:00:00 2001 From: "Kevin L. Mitchell" Date: Fri, 2 Sep 2011 11:00:03 -0500 Subject: Forgot to handle return value --- nova/scheduler/manager.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nova/scheduler/manager.py b/nova/scheduler/manager.py index 92bb1ed6e..bf18abc6c 100644 --- a/nova/scheduler/manager.py +++ b/nova/scheduler/manager.py @@ -100,7 +100,7 @@ class SchedulerManager(manager.Manager): "Reverting to schedule()") % locals()) real_meth = self.driver.schedule args = (elevated, topic) + args - real_meth(*args, **kwargs) + host = real_meth(*args, **kwargs) if not host: LOG.debug(_("%(topic)s %(method)s handled in Scheduler") -- cgit From 666f7152910838f866ca4b76258b025c27744ffb Mon Sep 17 00:00:00 2001 From: "Kevin L. Mitchell" Date: Fri, 2 Sep 2011 12:31:10 -0500 Subject: Add documentation comment --- nova/scheduler/host_filter.py | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/nova/scheduler/host_filter.py b/nova/scheduler/host_filter.py index 4024ec854..9f7d34ea7 100644 --- a/nova/scheduler/host_filter.py +++ b/nova/scheduler/host_filter.py @@ -31,6 +31,11 @@ import types from nova import exception from nova import flags import nova.scheduler + +# NOTE(Vek): Even though we don't use filters in here anywhere, we +# depend on default_host_filter being available in FLAGS, +# and that happens only when filters/abstract_filter.py is +# imported. from nova.scheduler import filters -- cgit From 494eb94192a971f64fa6aa78092074f8ed437a7f Mon Sep 17 00:00:00 2001 From: Tushar Patil Date: Fri, 2 Sep 2011 17:09:09 -0700 Subject: Added unit tests to check instance record is not inserted in db when security groups passed to the instances are not existing --- nova/tests/test_compute.py | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/nova/tests/test_compute.py b/nova/tests/test_compute.py index 766a7da9b..65fdffbd6 100644 --- a/nova/tests/test_compute.py +++ b/nova/tests/test_compute.py @@ -161,6 +161,19 @@ class ComputeTestCase(test.TestCase): db.security_group_destroy(self.context, group['id']) db.instance_destroy(self.context, ref[0]['id']) + def test_create_instance_with_invalid_security_group_raises(self): + instance_type = instance_types.get_default_instance_type() + + pre_build_len = len(db.instance_get_all(context.get_admin_context())) + self.assertRaises(exception.SecurityGroupNotFoundForProject, + self.compute_api.create, + self.context, + instance_type=instance_type, + image_href=None, + security_group=['this_is_a_fake_sec_group']) + self.assertEqual(pre_build_len, + len(db.instance_get_all(context.get_admin_context()))) + def test_create_instance_associates_config_drive(self): """Make sure create associates a config drive.""" -- cgit