diff options
| author | Ken Pepple <ken.pepple@gmail.com> | 2011-04-22 00:11:54 -0700 |
|---|---|---|
| committer | Ken Pepple <ken.pepple@gmail.com> | 2011-04-22 00:11:54 -0700 |
| commit | 1efd359644aecbd6262e334e184897b98ef6c9d3 (patch) | |
| tree | 7799ccc603aacafdc881a4bdaf86f5ce7579e68f /nova/compute | |
| parent | 63f5aa5484aa9d61f2ed79caae1c665230a56f35 (diff) | |
| parent | 1a814ba56a696ce796ab7707eacc2ee065c448e8 (diff) | |
merge trunk
Diffstat (limited to 'nova/compute')
| -rw-r--r-- | nova/compute/api.py | 55 | ||||
| -rw-r--r-- | nova/compute/instance_types.py | 4 | ||||
| -rw-r--r-- | nova/compute/manager.py | 17 | ||||
| -rw-r--r-- | nova/compute/monitor.py | 4 |
4 files changed, 41 insertions, 39 deletions
diff --git a/nova/compute/api.py b/nova/compute/api.py index e6146231c..264961fe3 100644 --- a/nova/compute/api.py +++ b/nova/compute/api.py @@ -102,19 +102,40 @@ class API(base.Base): if len(content) > content_limit: raise quota.QuotaError(code="OnsetFileContentLimitExceeded") + def _check_metadata_properties_quota(self, context, metadata={}): + """Enforce quota limits on metadata properties.""" + num_metadata = len(metadata) + quota_metadata = quota.allowed_metadata_items(context, num_metadata) + if quota_metadata < num_metadata: + pid = context.project_id + msg = _("Quota exceeeded for %(pid)s, tried to set " + "%(num_metadata)s metadata properties") % locals() + LOG.warn(msg) + raise quota.QuotaError(msg, "MetadataLimitExceeded") + + # Because metadata is stored in the DB, we hard-code the size limits + # In future, we may support more variable length strings, so we act + # as if this is quota-controlled for forwards compatibility + for k, v in metadata.iteritems(): + if len(k) > 255 or len(v) > 255: + pid = context.project_id + msg = _("Quota exceeeded for %(pid)s, metadata property " + "key or value too long") % locals() + LOG.warn(msg) + raise quota.QuotaError(msg, "MetadataLimitExceeded") + def create(self, context, instance_type, image_id, kernel_id=None, ramdisk_id=None, min_count=1, max_count=1, display_name='', display_description='', key_name=None, key_data=None, security_group='default', - availability_zone=None, user_data=None, metadata=[], + availability_zone=None, user_data=None, metadata={}, injected_files=None): """Create the number and type of instances requested. Verifies that quota and other arguments are valid. """ - if not instance_type: instance_type = instance_types.get_default_instance_type() @@ -128,30 +149,7 @@ class API(base.Base): "run %s more instances of this type.") % num_instances, "InstanceLimitExceeded") - num_metadata = len(metadata) - quota_metadata = quota.allowed_metadata_items(context, num_metadata) - if quota_metadata < num_metadata: - pid = context.project_id - msg = (_("Quota exceeeded for %(pid)s," - " tried to set %(num_metadata)s metadata properties") - % locals()) - LOG.warn(msg) - raise quota.QuotaError(msg, "MetadataLimitExceeded") - - # Because metadata is stored in the DB, we hard-code the size limits - # In future, we may support more variable length strings, so we act - # as if this is quota-controlled for forwards compatibility - for metadata_item in metadata: - k = metadata_item['key'] - v = metadata_item['value'] - if len(k) > 255 or len(v) > 255: - pid = context.project_id - msg = (_("Quota exceeeded for %(pid)s," - " metadata property key or value too long") - % locals()) - LOG.warn(msg) - raise quota.QuotaError(msg, "MetadataLimitExceeded") - + self._check_metadata_properties_quota(context, metadata) self._check_injected_file_quota(context, injected_files) image = self.image_service.show(context, image_id) @@ -239,7 +237,7 @@ class API(base.Base): # Set sane defaults if not specified updates = dict(hostname=self.hostname_factory(instance_id)) if (not hasattr(instance, 'display_name') or - instance.display_name == None): + instance.display_name is None): updates['display_name'] = "Server %s" % instance_id instance = self.update(context, instance_id, **updates) @@ -722,5 +720,8 @@ class API(base.Base): def update_or_create_instance_metadata(self, context, instance_id, metadata): """Updates or creates instance metadata.""" + combined_metadata = self.get_instance_metadata(context, instance_id) + combined_metadata.update(metadata) + self._check_metadata_properties_quota(context, combined_metadata) self.db.instance_metadata_update_or_create(context, instance_id, metadata) diff --git a/nova/compute/instance_types.py b/nova/compute/instance_types.py index 6accf82bb..4e250bb83 100644 --- a/nova/compute/instance_types.py +++ b/nova/compute/instance_types.py @@ -63,7 +63,7 @@ def create(name, memory, vcpus, local_gb, flavorid, swap=0, def destroy(name): """Marks instance types as deleted.""" - if name == None: + if name is None: raise exception.InvalidInputException(_("No instance type specified")) else: try: @@ -75,7 +75,7 @@ def destroy(name): def purge(name): """Removes instance types from database.""" - if name == None: + if name is None: raise exception.InvalidInputException(_("No instance type specified")) else: try: diff --git a/nova/compute/manager.py b/nova/compute/manager.py index 68b163355..c795d72ad 100644 --- a/nova/compute/manager.py +++ b/nova/compute/manager.py @@ -434,7 +434,6 @@ class ComputeManager(manager.SchedulerDependentManager): """Destroys the source instance""" context = context.elevated() instance_ref = self.db.instance_get(context, instance_id) - migration_ref = self.db.migration_get(context, migration_id) self.driver.destroy(instance_ref) @exception.wrap_exception @@ -525,8 +524,9 @@ class ComputeManager(manager.SchedulerDependentManager): self.db.migration_update(context, migration_id, {'status': 'post-migrating', }) - service = self.db.service_get_by_host_and_topic(context, - migration_ref['dest_compute'], FLAGS.compute_topic) + # Make sure the service exists before sending a message. + _service = self.db.service_get_by_host_and_topic(context, + migration_ref['dest_compute'], FLAGS.compute_topic) topic = self.db.queue_get_for(context, FLAGS.compute_topic, migration_ref['dest_compute']) rpc.cast(context, topic, @@ -652,7 +652,6 @@ class ComputeManager(manager.SchedulerDependentManager): """ context = context.elevated() - instance_ref = self.db.instance_get(context, instance_id) LOG.debug(_('instance %s: locking'), instance_id, context=context) self.db.instance_update(context, instance_id, {'locked': True}) @@ -664,7 +663,6 @@ class ComputeManager(manager.SchedulerDependentManager): """ context = context.elevated() - instance_ref = self.db.instance_get(context, instance_id) LOG.debug(_('instance %s: unlocking'), instance_id, context=context) self.db.instance_update(context, instance_id, {'locked': False}) @@ -1111,6 +1109,9 @@ class ComputeManager(manager.SchedulerDependentManager): # Are there VMs not in the DB? for vm_not_found_in_db in vms_not_found_in_db: name = vm_not_found_in_db - # TODO(justinsb): What to do here? Adopt it? Shut it down? - LOG.warning(_("Found VM not in DB: '%(name)s'. Ignoring") - % locals()) + + # We only care about instances that compute *should* know about + if name.startswith("instance-"): + # TODO(justinsb): What to do here? Adopt it? Shut it down? + LOG.warning(_("Found VM not in DB: '%(name)s'. Ignoring") + % locals()) diff --git a/nova/compute/monitor.py b/nova/compute/monitor.py index 04e08a235..3bb54a382 100644 --- a/nova/compute/monitor.py +++ b/nova/compute/monitor.py @@ -260,7 +260,7 @@ class Instance(object): try: data = self.fetch_cpu_stats() - if data != None: + if data is not None: LOG.debug('CPU: %s', data) update_rrd(self, 'cpu', data) @@ -313,7 +313,7 @@ class Instance(object): LOG.debug('CPU: %d', self.cputime) # Skip calculation on first pass. Need delta to get a meaningful value. - if cputime_last_updated == None: + if cputime_last_updated is None: return None # Calculate the number of seconds between samples. |
