diff options
| author | Trey Morris <trey.morris@rackspace.com> | 2011-06-20 11:23:49 -0500 |
|---|---|---|
| committer | Trey Morris <trey.morris@rackspace.com> | 2011-06-20 11:23:49 -0500 |
| commit | 5f8637007e8f8bdb4f27150c2e4d95f031899301 (patch) | |
| tree | 5de6a75045427237ba471d91bdfc23e69f997df7 /nova/compute | |
| parent | 9206e7acd9ec1c6ff3a71c826b8ee26c108d3d3e (diff) | |
| parent | c2a8d0f1e2e9a25465100128bae4f60b532d16f5 (diff) | |
another trunk merge
Diffstat (limited to 'nova/compute')
| -rw-r--r-- | nova/compute/api.py | 103 | ||||
| -rw-r--r-- | nova/compute/manager.py | 145 | ||||
| -rw-r--r-- | nova/compute/utils.py | 29 |
3 files changed, 251 insertions, 26 deletions
diff --git a/nova/compute/api.py b/nova/compute/api.py index 6c31a9697..1d0b9962e 100644 --- a/nova/compute/api.py +++ b/nova/compute/api.py @@ -34,6 +34,7 @@ from nova import utils from nova import volume from nova.compute import instance_types from nova.compute import power_state +from nova.compute.utils import terminate_volumes from nova.scheduler import api as scheduler_api from nova.db import base @@ -52,6 +53,18 @@ def generate_default_hostname(instance_id): return str(instance_id) +def _is_able_to_shutdown(instance, instance_id): + states = {'terminating': "Instance %s is already being terminated", + 'migrating': "Instance %s is being migrated", + 'stopping': "Instance %s is being stopped"} + msg = states.get(instance['state_description']) + if msg: + LOG.warning(_(msg), instance_id) + return False + + return True + + class API(base.Base): """API for interacting with the compute manager.""" @@ -218,7 +231,7 @@ class API(base.Base): return (num_instances, base_options, security_groups) def create_db_entry_for_new_instance(self, context, base_options, - security_groups, num=1): + security_groups, block_device_mapping, num=1): """Create an entry in the DB for this new instance, including any related table updates (such as security groups, MAC address, etc). This will called by create() @@ -237,6 +250,23 @@ class API(base.Base): instance_id, security_group_id) + # NOTE(yamahata) + # tell vm driver to attach volume at boot time by updating + # BlockDeviceMapping + for bdm in block_device_mapping: + LOG.debug(_('bdm %s'), bdm) + assert 'device_name' in bdm + values = { + 'instance_id': instance_id, + 'device_name': bdm['device_name'], + 'delete_on_termination': bdm.get('delete_on_termination'), + 'virtual_name': bdm.get('virtual_name'), + 'snapshot_id': bdm.get('snapshot_id'), + 'volume_id': bdm.get('volume_id'), + 'volume_size': bdm.get('volume_size'), + 'no_device': bdm.get('no_device')} + self.db.block_device_mapping_create(elevated, values) + # Set sane defaults if not specified updates = dict(hostname=self.hostname_factory(instance_id)) if (not hasattr(instance, 'display_name') or @@ -321,7 +351,7 @@ class API(base.Base): key_name=None, key_data=None, security_group='default', availability_zone=None, user_data=None, metadata={}, injected_files=None, admin_password=None, zone_blob=None, - reservation_id=None): + reservation_id=None, block_device_mapping=None): """ Provision the instances by sending off a series of single instance requests to the Schedulers. This is fine for trival @@ -342,11 +372,13 @@ class API(base.Base): injected_files, admin_password, zone_blob, reservation_id) + block_device_mapping = block_device_mapping or [] instances = [] LOG.debug(_("Going to run %s instances..."), num_instances) for num in range(num_instances): instance = self.create_db_entry_for_new_instance(context, - base_options, security_groups, num=num) + base_options, security_groups, + block_device_mapping, num=num) instances.append(instance) instance_id = instance['id'] @@ -456,24 +488,22 @@ class API(base.Base): rv = self.db.instance_update(context, instance_id, kwargs) return dict(rv.iteritems()) - @scheduler_api.reroute_compute("delete") - def delete(self, context, instance_id): - """Terminate an instance.""" - LOG.debug(_("Going to try to terminate %s"), instance_id) + def _get_instance(self, context, instance_id, action_str): try: - instance = self.get(context, instance_id) + return self.get(context, instance_id) except exception.NotFound: - LOG.warning(_("Instance %s was not found during terminate"), - instance_id) + LOG.warning(_("Instance %(instance_id)s was not found during " + "%(action_str)s") % + {'instance_id': instance_id, 'action_str': action_str}) raise - if instance['state_description'] == 'terminating': - LOG.warning(_("Instance %s is already being terminated"), - instance_id) - return + @scheduler_api.reroute_compute("delete") + def delete(self, context, instance_id): + """Terminate an instance.""" + LOG.debug(_("Going to try to terminate %s"), instance_id) + instance = self._get_instance(context, instance_id, 'terminating') - if instance['state_description'] == 'migrating': - LOG.warning(_("Instance %s is being migrated"), instance_id) + if not _is_able_to_shutdown(instance, instance_id): return self.update(context, @@ -487,8 +517,48 @@ class API(base.Base): self._cast_compute_message('terminate_instance', context, instance_id, host) else: + terminate_volumes(self.db, context, instance_id) self.db.instance_destroy(context, instance_id) + @scheduler_api.reroute_compute("stop") + def stop(self, context, instance_id): + """Stop an instance.""" + LOG.debug(_("Going to try to stop %s"), instance_id) + + instance = self._get_instance(context, instance_id, 'stopping') + if not _is_able_to_shutdown(instance, instance_id): + return + + self.update(context, + instance['id'], + state_description='stopping', + state=power_state.NOSTATE, + terminated_at=utils.utcnow()) + + host = instance['host'] + if host: + self._cast_compute_message('stop_instance', context, + instance_id, host) + + def start(self, context, instance_id): + """Start an instance.""" + LOG.debug(_("Going to try to start %s"), instance_id) + instance = self._get_instance(context, instance_id, 'starting') + if instance['state_description'] != 'stopped': + _state_description = instance['state_description'] + LOG.warning(_("Instance %(instance_id)s is not " + "stopped(%(_state_description)s)") % locals()) + return + + # TODO(yamahata): injected_files isn't supported right now. + # It is used only for osapi. not for ec2 api. + # availability_zone isn't used by run_instance. + rpc.cast(context, + FLAGS.scheduler_topic, + {"method": "start_instance", + "args": {"topic": FLAGS.compute_topic, + "instance_id": instance_id}}) + def get(self, context, instance_id): """Get a single instance with the given instance_id.""" rv = self.db.instance_get(context, instance_id) @@ -508,6 +578,7 @@ class API(base.Base): """Get all instances with this reservation_id, across all available Zones (if any). """ + context = context.elevated() instances = self.db.instance_get_all_by_reservation( context, reservation_id) diff --git a/nova/compute/manager.py b/nova/compute/manager.py index 2ad0c0d04..28c32f086 100644 --- a/nova/compute/manager.py +++ b/nova/compute/manager.py @@ -53,6 +53,7 @@ from nova import rpc from nova import utils from nova import volume from nova.compute import power_state +from nova.compute.utils import terminate_volumes from nova.virt import driver @@ -200,8 +201,63 @@ class ComputeManager(manager.SchedulerDependentManager): """ return self.driver.refresh_security_group_members(security_group_id) - @exception.wrap_exception - def run_instance(self, context, instance_id, **kwargs): + def _setup_block_device_mapping(self, context, instance_id): + """setup volumes for block device mapping""" + self.db.instance_set_state(context, + instance_id, + power_state.NOSTATE, + 'block_device_mapping') + + volume_api = volume.API() + block_device_mapping = [] + for bdm in self.db.block_device_mapping_get_all_by_instance( + context, instance_id): + LOG.debug(_("setting up bdm %s"), bdm) + if ((bdm['snapshot_id'] is not None) and + (bdm['volume_id'] is None)): + # TODO(yamahata): default name and description + vol = volume_api.create(context, bdm['volume_size'], + bdm['snapshot_id'], '', '') + # TODO(yamahata): creating volume simultaneously + # reduces creation time? + volume_api.wait_creation(context, vol['id']) + self.db.block_device_mapping_update( + context, bdm['id'], {'volume_id': vol['id']}) + bdm['volume_id'] = vol['id'] + + if not ((bdm['snapshot_id'] is None) or + (bdm['volume_id'] is not None)): + LOG.error(_('corrupted state of block device mapping ' + 'id: %(id)s ' + 'snapshot: %(snapshot_id) volume: %(vollume_id)') % + {'id': bdm['id'], + 'snapshot_id': bdm['snapshot'], + 'volume_id': bdm['volume_id']}) + raise exception.ApiError(_('broken block device mapping %d') % + bdm['id']) + + if bdm['volume_id'] is not None: + volume_api.check_attach(context, + volume_id=bdm['volume_id']) + dev_path = self._attach_volume_boot(context, instance_id, + bdm['volume_id'], + bdm['device_name']) + block_device_mapping.append({'device_path': dev_path, + 'mount_device': + bdm['device_name']}) + elif bdm['virtual_name'] is not None: + # TODO(yamahata): ephemeral/swap device support + LOG.debug(_('block_device_mapping: ' + 'ephemeral device is not supported yet')) + else: + # TODO(yamahata): NoDevice support + assert bdm['no_device'] + LOG.debug(_('block_device_mapping: ' + 'no device is not supported yet')) + + return block_device_mapping + + def _run_instance(self, context, instance_id, **kwargs): """Launch a new instance with specified options.""" context = context.elevated() instance = self.db.instance_get(context, instance_id) @@ -238,11 +294,14 @@ class ComputeManager(manager.SchedulerDependentManager): # all vif creation and network injection, maybe this is correct network_info = [] + block_device_mapping = self._setup_block_device_mapping(context, + instance_id) + # TODO(vish) check to make sure the availability zone matches self._update_state(context, instance_id, power_state.BUILDING) try: - self.driver.spawn(instance, network_info) + self.driver.spawn(instance_ref, network_info, block_device_mapping) except Exception as ex: # pylint: disable=W0702 msg = _("Instance '%(instance_id)s' failed to spawn. Is " "virtualization enabled in the BIOS? Details: " @@ -253,30 +312,60 @@ class ComputeManager(manager.SchedulerDependentManager): self._update_state(context, instance_id) @exception.wrap_exception + def run_instance(self, context, instance_id, **kwargs): + self._run_instance(context, instance_id, **kwargs) + + @exception.wrap_exception @checks_instance_lock - def terminate_instance(self, context, instance_id): - """Terminate an instance on this host.""" + def start_instance(self, context, instance_id): + """Starting an instance on this host.""" + # TODO(yamahata): injected_files isn't supported. + # Anyway OSAPI doesn't support stop/start yet + self._run_instance(context, instance_id) + + def _shutdown_instance(self, context, instance_id, action_str): + """Shutdown an instance on this host.""" context = context.elevated() instance = self.db.instance_get(context, instance_id) - LOG.audit(_("Terminating instance %s"), instance_id, context=context) + LOG.audit(_("%(action_str)s instance %(instance_id)s") % + {'action_str': action_str, 'instance_id': instance_id}, + context=context) if not FLAGS.stub_network: self.network_api.deallocate_for_instance(context, instance) volumes = instance.get('volumes') or [] for volume in volumes: - self.detach_volume(context, instance_id, volume['id']) - if instance['state'] == power_state.SHUTOFF: + self._detach_volume(context, instance_id, volume['id'], False) + + if (instance['state'] == power_state.SHUTOFF and + instance['state_description'] != 'stopped'): self.db.instance_destroy(context, instance_id) raise exception.Error(_('trying to destroy already destroyed' ' instance: %s') % instance_id) self.driver.destroy(instance) + if action_str == 'Terminating': + terminate_volumes(self.db, context, instance_id) + + @exception.wrap_exception + @checks_instance_lock + def terminate_instance(self, context, instance_id): + """Terminate an instance on this host.""" + self._shutdown_instance(context, instance_id, 'Terminating') + # TODO(ja): should we keep it in a terminated state for a bit? self.db.instance_destroy(context, instance_id) @exception.wrap_exception @checks_instance_lock + def stop_instance(self, context, instance_id): + """Stopping an instance on this host.""" + self._shutdown_instance(context, instance_id, 'Stopping') + # instance state will be updated to stopped by _poll_instance_states() + + @exception.wrap_exception + @checks_instance_lock def rebuild_instance(self, context, instance_id, **kwargs): """Destroy and re-make this instance. @@ -763,6 +852,22 @@ class ComputeManager(manager.SchedulerDependentManager): instance_ref = self.db.instance_get(context, instance_id) return self.driver.get_vnc_console(instance_ref) + def _attach_volume_boot(self, context, instance_id, volume_id, mountpoint): + """Attach a volume to an instance at boot time. So actual attach + is done by instance creation""" + + # TODO(yamahata): + # should move check_attach to volume manager? + volume.API().check_attach(context, volume_id) + + context = context.elevated() + LOG.audit(_("instance %(instance_id)s: booting with " + "volume %(volume_id)s at %(mountpoint)s") % + locals(), context=context) + dev_path = self.volume_manager.setup_compute_volume(context, volume_id) + self.db.volume_attached(context, volume_id, instance_id, mountpoint) + return dev_path + @checks_instance_lock def attach_volume(self, context, instance_id, volume_id, mountpoint): """Attach a volume to an instance.""" @@ -780,6 +885,16 @@ class ComputeManager(manager.SchedulerDependentManager): volume_id, instance_id, mountpoint) + values = { + 'instance_id': instance_id, + 'device_name': mountpoint, + 'delete_on_termination': False, + 'virtual_name': None, + 'snapshot_id': None, + 'volume_id': volume_id, + 'volume_size': None, + 'no_device': None} + self.db.block_device_mapping_create(context, values) except Exception as exc: # pylint: disable=W0702 # NOTE(vish): The inline callback eats the exception info so we # log the traceback here and reraise the same @@ -794,7 +909,7 @@ class ComputeManager(manager.SchedulerDependentManager): @exception.wrap_exception @checks_instance_lock - def detach_volume(self, context, instance_id, volume_id): + def _detach_volume(self, context, instance_id, volume_id, destroy_bdm): """Detach a volume from an instance.""" context = context.elevated() instance_ref = self.db.instance_get(context, instance_id) @@ -810,8 +925,15 @@ class ComputeManager(manager.SchedulerDependentManager): volume_ref['mountpoint']) self.volume_manager.remove_compute_volume(context, volume_id) self.db.volume_detached(context, volume_id) + if destroy_bdm: + self.db.block_device_mapping_destroy_by_instance_and_volume( + context, instance_id, volume_id) return True + def detach_volume(self, context, instance_id, volume_id): + """Detach a volume from an instance.""" + return self._detach_volume(context, instance_id, volume_id, True) + def remove_volume(self, context, volume_id): """Remove volume on compute host. @@ -1138,11 +1260,14 @@ class ComputeManager(manager.SchedulerDependentManager): "State=%(db_state)s, so setting state to " "shutoff.") % locals()) vm_state = power_state.SHUTOFF + if db_instance['state_description'] == 'stopping': + self.db.instance_stop(context, db_instance['id']) + continue else: vm_state = vm_instance.state vms_not_found_in_db.remove(name) - if db_instance['state_description'] == 'migrating': + if (db_instance['state_description'] in ['migrating', 'stopping']): # A situation which db record exists, but no instance" # sometimes occurs while live-migration at src compute, # this case should be ignored. diff --git a/nova/compute/utils.py b/nova/compute/utils.py new file mode 100644 index 000000000..c8cb9bab8 --- /dev/null +++ b/nova/compute/utils.py @@ -0,0 +1,29 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright (c) 2011 VA Linux Systems Japan K.K +# Copyright (c) 2011 Isaku Yamahata +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from nova import volume + + +def terminate_volumes(db, context, instance_id): + """delete volumes of delete_on_termination=True in block device mapping""" + volume_api = volume.API() + for bdm in db.block_device_mapping_get_all_by_instance(context, + instance_id): + #LOG.debug(_("terminating bdm %s") % bdm) + if bdm['volume_id'] and bdm['delete_on_termination']: + volume_api.delete(context, bdm['volume_id']) + db.block_device_mapping_destroy(context, bdm['id']) |
