diff options
| author | Isaku Yamahata <yamahata@valinux.co.jp> | 2011-08-02 11:36:58 +0900 |
|---|---|---|
| committer | Isaku Yamahata <yamahata@valinux.co.jp> | 2011-08-02 11:36:58 +0900 |
| commit | 0ffc3b80490c24ee116f5be070249dcbbed9357b (patch) | |
| tree | 74b5a542119899a9b483288d2fa6a354751d4090 /nova/compute | |
| parent | a52b643b18e1bac18b642ecfd781809eb5612763 (diff) | |
| parent | bdcfaa5b927a096f507fb0f7e2d81989173957f8 (diff) | |
merged with nova trunk
Diffstat (limited to 'nova/compute')
| -rw-r--r-- | nova/compute/api.py | 14 | ||||
| -rw-r--r-- | nova/compute/instance_types.py | 9 | ||||
| -rw-r--r-- | nova/compute/manager.py | 81 |
3 files changed, 64 insertions, 40 deletions
diff --git a/nova/compute/api.py b/nova/compute/api.py index 942114161..91c7e2026 100644 --- a/nova/compute/api.py +++ b/nova/compute/api.py @@ -127,7 +127,7 @@ class API(base.Base): quota_metadata = quota.allowed_metadata_items(context, num_metadata) if quota_metadata < num_metadata: pid = context.project_id - msg = _("Quota exceeeded for %(pid)s, tried to set " + msg = _("Quota exceeded for %(pid)s, tried to set " "%(num_metadata)s metadata properties") % locals() LOG.warn(msg) raise quota.QuotaError(msg, "MetadataLimitExceeded") @@ -138,7 +138,7 @@ class API(base.Base): for k, v in metadata.iteritems(): if len(k) > 255 or len(v) > 255: pid = context.project_id - msg = _("Quota exceeeded for %(pid)s, metadata property " + msg = _("Quota exceeded for %(pid)s, metadata property " "key or value too long") % locals() LOG.warn(msg) raise quota.QuotaError(msg, "MetadataLimitExceeded") @@ -165,7 +165,7 @@ class API(base.Base): instance_type) if num_instances < min_count: pid = context.project_id - LOG.warn(_("Quota exceeeded for %(pid)s," + LOG.warn(_("Quota exceeded for %(pid)s," " tried to run %(min_count)s instances") % locals()) if num_instances <= 0: message = _("Instance quota exceeded. You cannot run any " @@ -600,6 +600,7 @@ class API(base.Base): self.db.queue_get_for(context, FLAGS.compute_topic, host), {'method': 'refresh_provider_fw_rules', 'args': {}}) + @scheduler_api.reroute_compute("update") def update(self, context, instance_id, **kwargs): """Updates the instance in the datastore. @@ -727,7 +728,7 @@ class API(base.Base): raise instances = None elif project_id or not context.is_admin: - if not context.project: + if not context.project_id: instances = self.db.instance_get_all_by_user( context, context.user_id) else: @@ -815,6 +816,7 @@ class API(base.Base): raise exception.Error(_("Unable to find host for Instance %s") % instance_id) + @scheduler_api.reroute_compute("backup") def backup(self, context, instance_id, name, backup_type, rotation, extra_properties=None): """Backup the given instance @@ -831,6 +833,7 @@ class API(base.Base): extra_properties=extra_properties) return recv_meta + @scheduler_api.reroute_compute("snapshot") def snapshot(self, context, instance_id, name, extra_properties=None): """Snapshot the given instance. @@ -873,10 +876,12 @@ class API(base.Base): params=params) return recv_meta + @scheduler_api.reroute_compute("reboot") def reboot(self, context, instance_id): """Reboot the given instance.""" self._cast_compute_message('reboot_instance', context, instance_id) + @scheduler_api.reroute_compute("rebuild") def rebuild(self, context, instance_id, image_href, name=None, metadata=None, files_to_inject=None): """Rebuild the given instance with the provided metadata.""" @@ -1063,6 +1068,7 @@ class API(base.Base): """Unrescue the given instance.""" self._cast_compute_message('unrescue_instance', context, instance_id) + @scheduler_api.reroute_compute("set_admin_password") def set_admin_password(self, context, instance_id, password=None): """Set the root/admin password for the given instance.""" host = self._find_host(context, instance_id) diff --git a/nova/compute/instance_types.py b/nova/compute/instance_types.py index c13a629a9..824416514 100644 --- a/nova/compute/instance_types.py +++ b/nova/compute/instance_types.py @@ -132,11 +132,8 @@ def get_instance_type_by_name(name): # flavors. def get_instance_type_by_flavor_id(flavor_id): """Retrieve instance type by flavor_id.""" - if flavor_id is None: - return get_default_instance_type() + ctxt = context.get_admin_context() try: - ctxt = context.get_admin_context() return db.instance_type_get_by_flavor_id(ctxt, flavor_id) - except exception.DBError, e: - LOG.exception(_('DB error: %s') % e) - raise exception.ApiError(_("Unknown flavor: %s") % flavor_id) + except ValueError: + raise exception.FlavorNotFound(flavor_id=flavor_id) diff --git a/nova/compute/manager.py b/nova/compute/manager.py index a4d2797d6..71febf7c5 100644 --- a/nova/compute/manager.py +++ b/nova/compute/manager.py @@ -215,6 +215,15 @@ class ComputeManager(manager.SchedulerDependentManager): """This call passes straight through to the virtualization driver.""" return self.driver.refresh_provider_fw_rules() + def _get_instance_nw_info(self, context, instance): + """Get a list of dictionaries of network data of an instance. + Returns an empty list if stub_network flag is set.""" + network_info = [] + if not FLAGS.stub_network: + network_info = self.network_api.get_instance_nw_info(context, + instance) + return network_info + def _setup_block_device_mapping(self, context, instance_id): """setup volumes for block device mapping""" self.db.instance_set_state(context, @@ -314,8 +323,6 @@ class ComputeManager(manager.SchedulerDependentManager): network_info = self.network_api.allocate_for_instance(context, instance, vpn=is_vpn) LOG.debug(_("instance network_info: |%s|"), network_info) - self.network_manager.setup_compute_network(context, - instance_id) else: # TODO(tr3buchet) not really sure how this should be handled. # virt requires network_info to be passed in but stub_network @@ -376,6 +383,7 @@ class ComputeManager(manager.SchedulerDependentManager): {'action_str': action_str, 'instance_id': instance_id}, context=context) + network_info = self._get_instance_nw_info(context, instance) if not FLAGS.stub_network: self.network_api.deallocate_for_instance(context, instance) @@ -388,7 +396,7 @@ class ComputeManager(manager.SchedulerDependentManager): self.db.instance_destroy(context, instance_id) raise exception.Error(_('trying to destroy already destroyed' ' instance: %s') % instance_id) - self.driver.destroy(instance) + self.driver.destroy(instance, network_info) if action_str == 'Terminating': terminate_volumes(self.db, context, instance_id) @@ -433,11 +441,16 @@ class ComputeManager(manager.SchedulerDependentManager): self._update_state(context, instance_id, power_state.BUILDING) - self.driver.destroy(instance_ref) + network_info = self._get_instance_nw_info(context, instance_ref) + + self.driver.destroy(instance_ref, network_info) image_ref = kwargs.get('image_ref') instance_ref.image_ref = image_ref instance_ref.injected_files = kwargs.get('injected_files', []) - self.driver.spawn(instance_ref) + network_info = self.network_api.get_instance_nw_info(context, + instance_ref) + bd_mapping = self._setup_block_device_mapping(context, instance_id) + self.driver.spawn(instance_ref, network_info, bd_mapping) self._update_image_ref(context, instance_id, image_ref) self._update_launched_at(context, instance_id) @@ -470,8 +483,8 @@ class ComputeManager(manager.SchedulerDependentManager): instance_id, power_state.NOSTATE, 'rebooting') - self.network_manager.setup_compute_network(context, instance_id) - self.driver.reboot(instance_ref) + network_info = self._get_instance_nw_info(context, instance_ref) + self.driver.reboot(instance_ref, network_info) self._update_state(context, instance_id) @exception.wrap_exception(notifier=notifier, publisher_id=publisher_id()) @@ -661,10 +674,10 @@ class ComputeManager(manager.SchedulerDependentManager): instance_id, power_state.NOSTATE, 'rescuing') - self.network_manager.setup_compute_network(context, instance_id) _update_state = lambda result: self._update_state_callback( self, context, instance_id, result) - self.driver.rescue(instance_ref, _update_state) + network_info = self._get_instance_nw_info(context, instance_ref) + self.driver.rescue(instance_ref, _update_state, network_info) self._update_state(context, instance_id) @exception.wrap_exception(notifier=notifier, publisher_id=publisher_id()) @@ -680,7 +693,8 @@ class ComputeManager(manager.SchedulerDependentManager): 'unrescuing') _update_state = lambda result: self._update_state_callback( self, context, instance_id, result) - self.driver.unrescue(instance_ref, _update_state) + network_info = self._get_instance_nw_info(context, instance_ref) + self.driver.unrescue(instance_ref, _update_state, network_info) self._update_state(context, instance_id) @staticmethod @@ -696,7 +710,8 @@ class ComputeManager(manager.SchedulerDependentManager): instance_ref = self.db.instance_get_by_uuid(context, migration_ref.instance_uuid) - self.driver.destroy(instance_ref) + network_info = self._get_instance_nw_info(context, instance_ref) + self.driver.destroy(instance_ref, network_info) usage_info = utils.usage_from_instance(instance_ref) notifier.notify('compute.%s' % self.host, 'compute.instance.resize.confirm', @@ -716,7 +731,8 @@ class ComputeManager(manager.SchedulerDependentManager): instance_ref = self.db.instance_get_by_uuid(context, migration_ref.instance_uuid) - self.driver.destroy(instance_ref) + network_info = self._get_instance_nw_info(context, instance_ref) + self.driver.destroy(instance_ref, network_info) topic = self.db.queue_get_for(context, FLAGS.compute_topic, instance_ref['host']) rpc.cast(context, topic, @@ -748,7 +764,7 @@ class ComputeManager(manager.SchedulerDependentManager): local_gb=instance_type['local_gb'], instance_type_id=instance_type['id'])) - self.driver.revert_resize(instance_ref) + self.driver.revert_migration(instance_ref) self.db.migration_update(context, migration_id, {'status': 'reverted'}) usage_info = utils.usage_from_instance(instance_ref) @@ -846,21 +862,26 @@ class ComputeManager(manager.SchedulerDependentManager): """ migration_ref = self.db.migration_get(context, migration_id) + + resize_instance = False instance_ref = self.db.instance_get_by_uuid(context, migration_ref.instance_uuid) - instance_type = self.db.instance_type_get_by_flavor_id(context, - migration_ref['new_flavor_id']) - self.db.instance_update(context, instance_ref.uuid, - dict(instance_type_id=instance_type['id'], - memory_mb=instance_type['memory_mb'], - vcpus=instance_type['vcpus'], - local_gb=instance_type['local_gb'])) + if migration_ref['old_flavor_id'] != migration_ref['new_flavor_id']: + instance_type = self.db.instance_type_get_by_flavor_id(context, + migration_ref['new_flavor_id']) + self.db.instance_update(context, instance_ref.uuid, + dict(instance_type_id=instance_type['id'], + memory_mb=instance_type['memory_mb'], + vcpus=instance_type['vcpus'], + local_gb=instance_type['local_gb'])) + resize_instance = True instance_ref = self.db.instance_get_by_uuid(context, instance_ref.uuid) - network_info = self.network_api.get_instance_nw_info(context, - instance_ref) - self.driver.finish_resize(instance_ref, disk_info, network_info) + + network_info = self._get_instance_nw_info(context, instance_ref) + self.driver.finish_migration(instance_ref, disk_info, network_info, + resize_instance) self.db.migration_update(context, migration_id, {'status': 'finished', }) @@ -873,7 +894,7 @@ class ComputeManager(manager.SchedulerDependentManager): """ self.network_api.add_fixed_ip_to_instance(context, instance_id, - network_id) + self.host, network_id) self.inject_network_info(context, instance_id) self.reset_network(context, instance_id) @@ -1013,8 +1034,7 @@ class ComputeManager(manager.SchedulerDependentManager): LOG.debug(_('instance %s: inject network info'), instance_id, context=context) instance = self.db.instance_get(context, instance_id) - network_info = self.network_api.get_instance_nw_info(context, - instance) + network_info = self._get_instance_nw_info(context, instance) LOG.debug(_("network_info to inject: |%s|"), network_info) self.driver.inject_network_info(instance, network_info) @@ -1232,17 +1252,17 @@ class ComputeManager(manager.SchedulerDependentManager): # # Retry operation is necessary because continuously request comes, # concorrent request occurs to iptables, then it complains. + network_info = self._get_instance_nw_info(context, instance_ref) max_retry = FLAGS.live_migration_retry_count for cnt in range(max_retry): try: - self.network_manager.setup_compute_network(context, - instance_id) + self.driver.plug_vifs(instance_ref, network_info) break except exception.ProcessExecutionError: if cnt == max_retry - 1: raise else: - LOG.warn(_("setup_compute_network() failed %(cnt)d." + LOG.warn(_("plug_vifs() failed %(cnt)d." "Retry up to %(max_retry)d for %(hostname)s.") % locals()) time.sleep(1) @@ -1320,8 +1340,9 @@ class ComputeManager(manager.SchedulerDependentManager): # Releasing vlan. # (not necessary in current implementation?) + network_info = self._get_instance_nw_info(ctxt, instance_ref) # Releasing security group ingress rule. - self.driver.unfilter_instance(instance_ref) + self.driver.unfilter_instance(instance_ref, network_info) # Database updating. i_name = instance_ref.name |
