summaryrefslogtreecommitdiffstats
path: root/nova/compute
diff options
context:
space:
mode:
Diffstat (limited to 'nova/compute')
-rw-r--r--nova/compute/api.py192
1 files changed, 136 insertions, 56 deletions
diff --git a/nova/compute/api.py b/nova/compute/api.py
index 4f327fab1..b0949a729 100644
--- a/nova/compute/api.py
+++ b/nova/compute/api.py
@@ -128,18 +128,16 @@ class API(base.Base):
LOG.warn(msg)
raise quota.QuotaError(msg, "MetadataLimitExceeded")
- def create(self, context, instance_type,
+ def _check_create_parameters(self, context, instance_type,
image_href, kernel_id=None, ramdisk_id=None,
min_count=1, max_count=1,
display_name='', display_description='',
key_name=None, key_data=None, security_group='default',
availability_zone=None, user_data=None, metadata={},
- injected_files=None,
- admin_password=None):
- """Create the number and type of instances requested.
+ injected_files=None, admin_password=None, zone_blob=None):
+ """Verify all the input parameters regardless of the provisioning
+ strategy being performed."""
- Verifies that quota and other arguments are valid.
- """
if not instance_type:
instance_type = instance_types.get_default_instance_type()
@@ -225,63 +223,145 @@ class API(base.Base):
'metadata': metadata,
'availability_zone': availability_zone,
'os_type': os_type}
- elevated = context.elevated()
- instances = []
- LOG.debug(_("Going to run %s instances..."), num_instances)
- for num in range(num_instances):
- instance = dict(mac_address=utils.generate_mac(),
- launch_index=num,
- **base_options)
- instance = self.db.instance_create(context, instance)
- instance_id = instance['id']
- elevated = context.elevated()
- if not security_groups:
- security_groups = []
- for security_group_id in security_groups:
- self.db.instance_add_security_group(elevated,
- instance_id,
- security_group_id)
-
- # Set sane defaults if not specified
- updates = dict(hostname=self.hostname_factory(instance_id))
- if (not hasattr(instance, 'display_name') or
- instance.display_name is None):
- updates['display_name'] = "Server %s" % instance_id
-
- instance = self.update(context, instance_id, **updates)
- instances.append(instance)
+ return (num_instances, base_options, security_groups)
+
+ def create_db_entry_for_new_instance(self, context, base_options,
+ security_groups, num=1):
+ """Create an entry in the DB for this new instance,
+ including any related table updates (such as security
+ groups, MAC address, etc). This will called by create()
+ in the majority of situations, but all-at-once style
+ Schedulers may initiate the call."""
+ instance = dict(mac_address=utils.generate_mac(),
+ launch_index=num,
+ **base_options)
+ instance = self.db.instance_create(context, instance)
+ instance_id = instance['id']
- pid = context.project_id
- uid = context.user_id
- LOG.debug(_("Casting to scheduler for %(pid)s/%(uid)s's"
- " instance %(instance_id)s") % locals())
+ elevated = context.elevated()
+ if not security_groups:
+ security_groups = []
+ for security_group_id in security_groups:
+ self.db.instance_add_security_group(elevated,
+ instance_id,
+ security_group_id)
- # NOTE(sandy): For now we're just going to pass in the
- # instance_type record to the scheduler. In a later phase
- # we'll be ripping this whole for-loop out and deferring the
- # creation of the Instance record. At that point all this will
- # change.
- rpc.cast(context,
- FLAGS.scheduler_topic,
- {"method": "run_instance",
- "args": {"topic": FLAGS.compute_topic,
- "instance_id": instance_id,
- "request_spec": {
- 'instance_type': instance_type,
- 'filter':
- 'nova.scheduler.host_filter.'
- 'InstanceTypeFilter',
- },
- "availability_zone": availability_zone,
- "injected_files": injected_files,
- "admin_password": admin_password,
- },
- })
+ # Set sane defaults if not specified
+ updates = dict(hostname=self.hostname_factory(instance_id))
+ if (not hasattr(instance, 'display_name') or
+ instance.display_name is None):
+ updates['display_name'] = "Server %s" % instance_id
+
+ instance = self.update(context, instance_id, **updates)
for group_id in security_groups:
self.trigger_security_group_members_refresh(elevated, group_id)
+ return instance
+
+ def _ask_scheduler_to_create_instance(self, context, base_options,
+ instance_type, zone_blob,
+ availability_zone, injected_files,
+ admin_password,
+ instance_id=None, num_instances=1):
+ """Send the run_instance request to the schedulers for processing."""
+ pid = context.project_id
+ uid = context.user_id
+ if instance_id:
+ LOG.debug(_("Casting to scheduler for %(pid)s/%(uid)s's"
+ " instance %(instance_id)s (single-shot)") % locals())
+ else:
+ LOG.debug(_("Casting to scheduler for %(pid)s/%(uid)s's"
+ " (all-at-once)") % locals())
+
+ filter_class = 'nova.scheduler.host_filter.InstanceTypeFilter'
+ request_spec = {
+ 'instance_properties': base_options,
+ 'instance_type': instance_type,
+ 'filter': filter_class,
+ 'blob': zone_blob,
+ 'num_instances': num_instances
+ }
+
+ rpc.cast(context,
+ FLAGS.scheduler_topic,
+ {"method": "run_instance",
+ "args": {"topic": FLAGS.compute_topic,
+ "instance_id": instance_id,
+ "request_spec": request_spec,
+ "availability_zone": availability_zone,
+ "admin_password": admin_password,
+ "injected_files": injected_files}})
+
+ def create_all_at_once(self, context, instance_type,
+ image_href, kernel_id=None, ramdisk_id=None,
+ min_count=1, max_count=1,
+ display_name='', display_description='',
+ key_name=None, key_data=None, security_group='default',
+ availability_zone=None, user_data=None, metadata={},
+ injected_files=None, admin_password=None, zone_blob=None):
+ """Provision the instances by passing the whole request to
+ the Scheduler for execution. Returns a Reservation ID
+ related to the creation of all of these instances."""
+ num_instances, base_options, security_groups = \
+ self._check_create_parameters(
+ context, instance_type,
+ image_href, kernel_id, ramdisk_id,
+ min_count, max_count,
+ display_name, display_description,
+ key_name, key_data, security_group,
+ availability_zone, user_data, metadata,
+ injected_files, admin_password, zone_blob)
+
+ self._ask_scheduler_to_create_instance(context, base_options,
+ instance_type, zone_blob,
+ availability_zone, injected_files,
+ admin_password,
+ num_instances=num_instances)
+
+ return base_options['reservation_id']
+
+ def create(self, context, instance_type,
+ image_href, kernel_id=None, ramdisk_id=None,
+ min_count=1, max_count=1,
+ display_name='', display_description='',
+ key_name=None, key_data=None, security_group='default',
+ availability_zone=None, user_data=None, metadata={},
+ injected_files=None, admin_password=None, zone_blob=None):
+ """
+ Provision the instances by sending off a series of single
+ instance requests to the Schedulers. This is fine for trival
+ Scheduler drivers, but may remove the effectiveness of the
+ more complicated drivers.
+
+ Returns a list of instance dicts.
+ """
+
+ num_instances, base_options, security_groups = \
+ self._check_create_parameters(
+ context, instance_type,
+ image_href, kernel_id, ramdisk_id,
+ min_count, max_count,
+ display_name, display_description,
+ key_name, key_data, security_group,
+ availability_zone, user_data, metadata,
+ injected_files, admin_password, zone_blob)
+
+ instances = []
+ LOG.debug(_("Going to run %s instances..."), num_instances)
+ for num in range(num_instances):
+ instance = self.create_db_entry_for_new_instance(context,
+ base_options, security_groups, num=num)
+ instances.append(instance)
+ instance_id = instance['id']
+
+ self._ask_scheduler_to_create_instance(context, base_options,
+ instance_type, zone_blob,
+ availability_zone, injected_files,
+ admin_password,
+ instance_id=instance_id)
+
return [dict(x.iteritems()) for x in instances]
def has_finished_migration(self, context, instance_id):