summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJenkins <jenkins@review.openstack.org>2013-06-04 21:13:41 +0000
committerGerrit Code Review <review@openstack.org>2013-06-04 21:13:41 +0000
commit7f050f7ef61be16bdcd2aafc1e3518398385eec5 (patch)
tree34ef112cd2498118a9f4b8d8caf0ed8b3a6ba0b7
parentaae7b7a7ec3701e2cc7a9a1fa726ba0e14520f74 (diff)
parent6b16c8731c44e4a6c80b803f3e8afdd88386d577 (diff)
downloadnova-7f050f7ef61be16bdcd2aafc1e3518398385eec5.tar.gz
nova-7f050f7ef61be16bdcd2aafc1e3518398385eec5.tar.xz
nova-7f050f7ef61be16bdcd2aafc1e3518398385eec5.zip
Merge "Call scheduler for run_instance from conductor"
-rw-r--r--nova/cells/manager.py10
-rw-r--r--nova/cells/messaging.py13
-rw-r--r--nova/cells/rpcapi.py14
-rw-r--r--nova/cells/scheduler.py153
-rw-r--r--nova/compute/api.py250
-rw-r--r--nova/compute/cells_api.py16
-rw-r--r--nova/conductor/api.py22
-rw-r--r--nova/conductor/manager.py15
-rw-r--r--nova/conductor/rpcapi.py14
-rw-r--r--nova/scheduler/utils.py33
-rw-r--r--nova/tests/api/ec2/test_cinder_cloud.py2
-rw-r--r--nova/tests/api/ec2/test_cloud.py2
-rw-r--r--nova/tests/cells/test_cells_manager.py9
-rw-r--r--nova/tests/cells/test_cells_messaging.py10
-rw-r--r--nova/tests/cells/test_cells_rpcapi.py15
-rw-r--r--nova/tests/cells/test_cells_scheduler.py260
-rw-r--r--nova/tests/conductor/test_conductor.py35
-rw-r--r--nova/tests/fake_utils.py28
-rw-r--r--nova/tests/integrated/test_api_samples.py2
-rw-r--r--nova/utils.py10
20 files changed, 708 insertions, 205 deletions
diff --git a/nova/cells/manager.py b/nova/cells/manager.py
index 15ee2224c..ba909c034 100644
--- a/nova/cells/manager.py
+++ b/nova/cells/manager.py
@@ -64,7 +64,7 @@ class CellsManager(manager.Manager):
Scheduling requests get passed to the scheduler class.
"""
- RPC_API_VERSION = '1.7'
+ RPC_API_VERSION = '1.8'
def __init__(self, *args, **kwargs):
# Mostly for tests.
@@ -186,6 +186,14 @@ class CellsManager(manager.Manager):
self.msg_runner.schedule_run_instance(ctxt, our_cell,
host_sched_kwargs)
+ def build_instances(self, ctxt, build_inst_kwargs):
+ """Pick a cell (possibly ourselves) to build new instance(s) and
+ forward the request accordingly.
+ """
+ # Target is ourselves first.
+ our_cell = self.state_manager.get_my_state()
+ self.msg_runner.build_instances(ctxt, our_cell, build_inst_kwargs)
+
def get_cell_info_for_neighbors(self, _ctxt):
"""Return cell information for our neighbor cells."""
return self.state_manager.get_cell_info_for_neighbors()
diff --git a/nova/cells/messaging.py b/nova/cells/messaging.py
index 1ea68d29f..319067836 100644
--- a/nova/cells/messaging.py
+++ b/nova/cells/messaging.py
@@ -651,6 +651,10 @@ class _TargetedMessageMethods(_BaseMessageMethods):
"""Parent cell told us to schedule new instance creation."""
self.msg_runner.scheduler.run_instance(message, host_sched_kwargs)
+ def build_instances(self, message, build_inst_kwargs):
+ """Parent cell told us to schedule new instance creation."""
+ self.msg_runner.scheduler.build_instances(message, build_inst_kwargs)
+
def run_compute_api_method(self, message, method_info):
"""Run a method in the compute api class."""
method = method_info['method']
@@ -1132,6 +1136,15 @@ class MessageRunner(object):
method_kwargs, 'down', target_cell)
message.process()
+ def build_instances(self, ctxt, target_cell, build_inst_kwargs):
+ """Called by the cell scheduler to tell a child cell to build
+ instance(s).
+ """
+ method_kwargs = dict(build_inst_kwargs=build_inst_kwargs)
+ message = _TargetedMessage(self, ctxt, 'build_instances',
+ method_kwargs, 'down', target_cell)
+ message.process()
+
def run_compute_api_method(self, ctxt, cell_name, method_info, call):
"""Call a compute API method in a specific cell."""
message = _TargetedMessage(self, ctxt, 'run_compute_api_method',
diff --git a/nova/cells/rpcapi.py b/nova/cells/rpcapi.py
index c8b7c680f..4a45af255 100644
--- a/nova/cells/rpcapi.py
+++ b/nova/cells/rpcapi.py
@@ -50,6 +50,7 @@ class CellsAPI(rpc_proxy.RpcProxy):
action_events_get()
1.6 - Adds consoleauth_delete_tokens() and validate_console_port()
1.7 - Adds service_update()
+ 1.8 - Adds build_instances(), deprecates schedule_run_instance()
'''
BASE_RPC_API_VERSION = '1.0'
@@ -79,11 +80,24 @@ class CellsAPI(rpc_proxy.RpcProxy):
method_info=method_info,
call=True))
+ # NOTE(alaski): Deprecated and should be removed later.
def schedule_run_instance(self, ctxt, **kwargs):
"""Schedule a new instance for creation."""
self.cast(ctxt, self.make_msg('schedule_run_instance',
host_sched_kwargs=kwargs))
+ def build_instances(self, ctxt, **kwargs):
+ """Build instances."""
+ build_inst_kwargs = kwargs
+ instances = build_inst_kwargs['instances']
+ instances_p = [jsonutils.to_primitive(inst) for inst in instances]
+ build_inst_kwargs['instances'] = instances_p
+ build_inst_kwargs['image'] = jsonutils.to_primitive(
+ build_inst_kwargs['image'])
+ self.cast(ctxt, self.make_msg('build_instances',
+ build_inst_kwargs=build_inst_kwargs),
+ version=1.8)
+
def instance_update_at_top(self, ctxt, instance):
"""Update instance at API level."""
if not CONF.cells.enable:
diff --git a/nova/cells/scheduler.py b/nova/cells/scheduler.py
index c95498fa0..c54b9b578 100644
--- a/nova/cells/scheduler.py
+++ b/nova/cells/scheduler.py
@@ -27,10 +27,12 @@ from nova import compute
from nova.compute import instance_actions
from nova.compute import utils as compute_utils
from nova.compute import vm_states
+from nova import conductor
from nova.db import base
from nova import exception
from nova.openstack.common import log as logging
from nova.scheduler import rpcapi as scheduler_rpcapi
+from nova.scheduler import utils as scheduler_utils
cell_scheduler_opts = [
cfg.ListOpt('scheduler_filter_classes',
@@ -67,6 +69,7 @@ class CellsScheduler(base.Base):
self.state_manager = msg_runner.state_manager
self.compute_api = compute.API()
self.scheduler_rpcapi = scheduler_rpcapi.SchedulerAPI()
+ self.compute_task_api = conductor.ComputeTaskAPI()
self.filter_handler = filters.CellFilterHandler()
self.filter_classes = self.filter_handler.get_matching_classes(
CONF.cells.scheduler_filter_classes)
@@ -74,18 +77,19 @@ class CellsScheduler(base.Base):
self.weigher_classes = self.weight_handler.get_matching_classes(
CONF.cells.scheduler_weight_classes)
- def _create_instances_here(self, ctxt, request_spec):
- instance_values = request_spec['instance_properties']
- num_instances = len(request_spec['instance_uuids'])
- for i, instance_uuid in enumerate(request_spec['instance_uuids']):
+ def _create_instances_here(self, ctxt, instance_uuids, instance_properties,
+ instance_type, image, security_groups, block_device_mapping):
+ instance_values = copy.copy(instance_properties)
+ num_instances = len(instance_uuids)
+ for i, instance_uuid in enumerate(instance_uuids):
instance_values['uuid'] = instance_uuid
instance = self.compute_api.create_db_entry_for_new_instance(
ctxt,
- request_spec['instance_type'],
- request_spec['image'],
+ instance_type,
+ image,
instance_values,
- request_spec['security_group'],
- request_spec['block_device_mapping'],
+ security_groups,
+ block_device_mapping,
num_instances, i)
self.msg_runner.instance_update_at_top(ctxt, instance)
@@ -104,24 +108,7 @@ class CellsScheduler(base.Base):
cells.append(our_cell)
return cells
- def _run_instance(self, message, host_sched_kwargs):
- """Attempt to schedule instance(s). If we have no cells
- to try, raise exception.NoCellsAvailable
- """
- ctxt = message.ctxt
- routing_path = message.routing_path
- request_spec = host_sched_kwargs['request_spec']
-
- LOG.debug(_("Scheduling with routing_path=%(routing_path)s"),
- {'routing_path': routing_path})
-
- filter_properties = copy.copy(host_sched_kwargs['filter_properties'])
- filter_properties.update({'context': ctxt,
- 'scheduler': self,
- 'routing_path': routing_path,
- 'host_sched_kwargs': host_sched_kwargs,
- 'request_spec': request_spec})
-
+ def _grab_target_cells(self, filter_properties):
cells = self._get_possible_cells()
cells = self.filter_handler.get_filtered_objects(self.filter_classes,
cells,
@@ -140,42 +127,124 @@ class CellsScheduler(base.Base):
self.weigher_classes, cells, filter_properties)
LOG.debug(_("Weighted cells: %(weighted_cells)s"),
{'weighted_cells': weighted_cells})
+ target_cells = [cell.obj for cell in weighted_cells]
+ return target_cells
- # Keep trying until one works
- for weighted_cell in weighted_cells:
- cell = weighted_cell.obj
+ def _run_instance(self, message, target_cells, instance_uuids,
+ host_sched_kwargs):
+ """Attempt to schedule instance(s)."""
+ ctxt = message.ctxt
+ request_spec = host_sched_kwargs['request_spec']
+ instance_properties = request_spec['instance_properties']
+ instance_type = request_spec['instance_type']
+ image = request_spec['image']
+ security_groups = request_spec['security_group']
+ block_device_mapping = request_spec['block_device_mapping']
+
+ LOG.debug(_("Scheduling with routing_path=%(routing_path)s"),
+ {'routing_path': message.routing_path})
+
+ for target_cell in target_cells:
try:
- if cell.is_me:
- # Need to create instance DB entry as scheduler
- # thinks it's already created... At least how things
- # currently work.
- self._create_instances_here(ctxt, request_spec)
+ if target_cell.is_me:
+ # Need to create instance DB entries as the host scheduler
+ # expects that the instance(s) already exists.
+ self._create_instances_here(ctxt, instance_uuids,
+ instance_properties, instance_type, image,
+ security_groups, block_device_mapping)
# Need to record the create action in the db as the
# scheduler expects it to already exist.
- self._create_action_here(
- ctxt, request_spec['instance_uuids'])
+ self._create_action_here(ctxt, instance_uuids)
self.scheduler_rpcapi.run_instance(ctxt,
**host_sched_kwargs)
return
- # Forward request to cell
- self.msg_runner.schedule_run_instance(ctxt, cell,
+ self.msg_runner.schedule_run_instance(ctxt, target_cell,
host_sched_kwargs)
return
except Exception:
LOG.exception(_("Couldn't communicate with cell '%s'") %
- cell.name)
+ target_cell.name)
# FIXME(comstud): Would be nice to kick this back up so that
# the parent cell could retry, if we had a parent.
msg = _("Couldn't communicate with any cells")
LOG.error(msg)
raise exception.NoCellsAvailable()
+ def _build_instances(self, message, target_cells, instance_uuids,
+ build_inst_kwargs):
+ """Attempt to build instance(s) or send msg to child cell."""
+ ctxt = message.ctxt
+ instance_properties = build_inst_kwargs['instances'][0]
+ instance_type = build_inst_kwargs['instance_type']
+ image = build_inst_kwargs['image']
+ security_groups = build_inst_kwargs['security_group']
+ block_device_mapping = build_inst_kwargs['block_device_mapping']
+
+ LOG.debug(_("Building instances with routing_path=%(routing_path)s"),
+ {'routing_path': message.routing_path})
+
+ for target_cell in target_cells:
+ try:
+ if target_cell.is_me:
+ # Need to create instance DB entries as the conductor
+ # expects that the instance(s) already exists.
+ self._create_instances_here(ctxt, instance_uuids,
+ instance_properties, instance_type, image,
+ security_groups, block_device_mapping)
+ # Need to record the create action in the db as the
+ # conductor expects it to already exist.
+ self._create_action_here(ctxt, instance_uuids)
+ self.compute_task_api.build_instances(ctxt,
+ **build_inst_kwargs)
+ return
+ self.msg_runner.build_instances(ctxt, target_cell,
+ build_inst_kwargs)
+ return
+ except Exception:
+ LOG.exception(_("Couldn't communicate with cell '%s'") %
+ target_cell.name)
+ # FIXME(comstud): Would be nice to kick this back up so that
+ # the parent cell could retry, if we had a parent.
+ msg = _("Couldn't communicate with any cells")
+ LOG.error(msg)
+ raise exception.NoCellsAvailable()
+
+ def build_instances(self, message, build_inst_kwargs):
+ image = build_inst_kwargs['image']
+ instance_uuids = [inst['uuid'] for inst in
+ build_inst_kwargs['instances']]
+ instances = build_inst_kwargs['instances']
+ request_spec = scheduler_utils.build_request_spec(image, instances)
+ filter_properties = copy.copy(build_inst_kwargs['filter_properties'])
+ filter_properties.update({'context': message.ctxt,
+ 'scheduler': self,
+ 'routing_path': message.routing_path,
+ 'host_sched_kwargs': build_inst_kwargs,
+ 'request_spec': request_spec})
+ self._schedule_build_to_cells(message, instance_uuids,
+ filter_properties, self._build_instances, build_inst_kwargs)
+
def run_instance(self, message, host_sched_kwargs):
- """Pick a cell where we should create a new instance."""
+ request_spec = host_sched_kwargs['request_spec']
+ instance_uuids = request_spec['instance_uuids']
+ filter_properties = copy.copy(host_sched_kwargs['filter_properties'])
+ filter_properties.update({'context': message.ctxt,
+ 'scheduler': self,
+ 'routing_path': message.routing_path,
+ 'host_sched_kwargs': host_sched_kwargs,
+ 'request_spec': request_spec})
+ self._schedule_build_to_cells(message, instance_uuids,
+ filter_properties, self._run_instance, host_sched_kwargs)
+
+ def _schedule_build_to_cells(self, message, instance_uuids,
+ filter_properties, method, method_kwargs):
+ """Pick a cell where we should create a new instance(s)."""
try:
for i in xrange(max(0, CONF.cells.scheduler_retries) + 1):
try:
- return self._run_instance(message, host_sched_kwargs)
+ target_cells = self._grab_target_cells(filter_properties)
+ return method(message, target_cells, instance_uuids,
+ method_kwargs)
except exception.NoCellsAvailable:
if i == max(0, CONF.cells.scheduler_retries):
raise
@@ -186,8 +255,6 @@ class CellsScheduler(base.Base):
time.sleep(sleep_time)
continue
except Exception:
- request_spec = host_sched_kwargs['request_spec']
- instance_uuids = request_spec['instance_uuids']
LOG.exception(_("Error scheduling instances %(instance_uuids)s"),
{'instance_uuids': instance_uuids})
ctxt = message.ctxt
diff --git a/nova/compute/api.py b/nova/compute/api.py
index a3ffb6fea..f676c9797 100644
--- a/nova/compute/api.py
+++ b/nova/compute/api.py
@@ -541,34 +541,20 @@ class API(base.Base):
self._check_injected_file_quota(context, files_to_inject)
self._check_requested_image(context, image_id, image, instance_type)
- def _validate_and_provision_instance(self, context, instance_type,
- image_href, kernel_id, ramdisk_id,
- min_count, max_count,
- display_name, display_description,
- key_name, key_data, security_groups,
+ def _validate_and_build_base_options(self, context, instance_type,
+ image, image_href, image_id,
+ kernel_id, ramdisk_id, min_count,
+ max_count, display_name,
+ display_description, key_name,
+ key_data, security_groups,
availability_zone, user_data,
metadata, injected_files,
access_ip_v4, access_ip_v6,
requested_networks, config_drive,
block_device_mapping,
- auto_disk_config, reservation_id,
- scheduler_hints):
+ auto_disk_config, reservation_id):
"""Verify all the input parameters regardless of the provisioning
strategy being performed."""
-
- if not metadata:
- metadata = {}
- if not security_groups:
- security_groups = ['default']
-
- if not instance_type:
- instance_type = flavors.get_default_instance_type()
- if not min_count:
- min_count = 1
- if not max_count:
- max_count = min_count
-
- block_device_mapping = block_device_mapping or []
if min_count > 1 or max_count > 1:
if any(map(lambda bdm: 'volume_id' in bdm, block_device_mapping)):
msg = _('Cannot attach one or more volumes to multiple'
@@ -591,88 +577,87 @@ class API(base.Base):
except base64.binascii.Error:
raise exception.InstanceUserDataMalformed()
+ self._checks_for_create_and_rebuild(context, image_id, image,
+ instance_type, metadata, injected_files)
+
+ self._check_requested_secgroups(context, security_groups)
+ self._check_requested_networks(context, requested_networks)
+
+ kernel_id, ramdisk_id = self._handle_kernel_and_ramdisk(
+ context, kernel_id, ramdisk_id, image)
+
+ config_drive_id, config_drive = self._check_config_drive(
+ context, config_drive)
+
+ if key_data is None and key_name:
+ key_pair = self.db.key_pair_get(context, context.user_id,
+ key_name)
+ key_data = key_pair['public_key']
+
+ root_device_name = block_device.properties_root_device_name(
+ image.get('properties', {}))
+
+ system_metadata = flavors.save_instance_type_info(
+ dict(), instance_type)
+
+ base_options = {
+ 'reservation_id': reservation_id,
+ 'image_ref': image_href,
+ 'kernel_id': kernel_id or '',
+ 'ramdisk_id': ramdisk_id or '',
+ 'power_state': power_state.NOSTATE,
+ 'vm_state': vm_states.BUILDING,
+ 'config_drive_id': config_drive_id or '',
+ 'config_drive': config_drive or '',
+ 'user_id': context.user_id,
+ 'project_id': context.project_id,
+ 'instance_type_id': instance_type['id'],
+ 'memory_mb': instance_type['memory_mb'],
+ 'vcpus': instance_type['vcpus'],
+ 'root_gb': instance_type['root_gb'],
+ 'ephemeral_gb': instance_type['ephemeral_gb'],
+ 'display_name': display_name,
+ 'display_description': display_description or '',
+ 'user_data': user_data,
+ 'key_name': key_name,
+ 'key_data': key_data,
+ 'locked': False,
+ 'metadata': metadata,
+ 'access_ip_v4': access_ip_v4,
+ 'access_ip_v6': access_ip_v6,
+ 'availability_zone': availability_zone,
+ 'root_device_name': root_device_name,
+ 'progress': 0,
+ 'system_metadata': system_metadata}
+
+ options_from_image = self._inherit_properties_from_image(
+ image, auto_disk_config)
+
+ base_options.update(options_from_image)
+
+ return base_options
+
+ def _build_filter_properties(self, context, scheduler_hints, forced_host,
+ forced_node, instance_type):
+ filter_properties = dict(scheduler_hints=scheduler_hints)
+ filter_properties['instance_type'] = instance_type
+ if forced_host:
+ check_policy(context, 'create:forced_host', {})
+ filter_properties['force_hosts'] = [forced_host]
+ if forced_node:
+ check_policy(context, 'create:forced_host', {})
+ filter_properties['force_nodes'] = [forced_node]
+ return filter_properties
+
+ def _provision_instances(self, context, instance_type, min_count,
+ max_count, base_options, image, security_groups,
+ block_device_mapping):
# Reserve quotas
num_instances, quota_reservations = self._check_num_instances_quota(
context, instance_type, min_count, max_count)
-
- # Try to create the instance
+ LOG.debug(_("Going to run %s instances...") % num_instances)
+ instances = []
try:
- instances = []
- instance_uuids = []
-
- image_id, image = self._get_image(context, image_href)
-
- self._checks_for_create_and_rebuild(context, image_id, image,
- instance_type, metadata, injected_files)
-
- self._check_requested_secgroups(context, security_groups)
- self._check_requested_networks(context, requested_networks)
-
- kernel_id, ramdisk_id = self._handle_kernel_and_ramdisk(
- context, kernel_id, ramdisk_id, image)
-
- config_drive_id, config_drive = self._check_config_drive(
- context, config_drive)
-
- if key_data is None and key_name:
- key_pair = self.db.key_pair_get(context, context.user_id,
- key_name)
- key_data = key_pair['public_key']
-
- root_device_name = block_device.properties_root_device_name(
- image.get('properties', {}))
-
- availability_zone, forced_host, forced_node = \
- self._handle_availability_zone(availability_zone)
-
- system_metadata = flavors.save_instance_type_info(
- dict(), instance_type)
-
- base_options = {
- 'reservation_id': reservation_id,
- 'image_ref': image_href,
- 'kernel_id': kernel_id or '',
- 'ramdisk_id': ramdisk_id or '',
- 'power_state': power_state.NOSTATE,
- 'vm_state': vm_states.BUILDING,
- 'config_drive_id': config_drive_id or '',
- 'config_drive': config_drive or '',
- 'user_id': context.user_id,
- 'project_id': context.project_id,
- 'instance_type_id': instance_type['id'],
- 'memory_mb': instance_type['memory_mb'],
- 'vcpus': instance_type['vcpus'],
- 'root_gb': instance_type['root_gb'],
- 'ephemeral_gb': instance_type['ephemeral_gb'],
- 'display_name': display_name,
- 'display_description': display_description or '',
- 'user_data': user_data,
- 'key_name': key_name,
- 'key_data': key_data,
- 'locked': False,
- 'metadata': metadata,
- 'access_ip_v4': access_ip_v4,
- 'access_ip_v6': access_ip_v6,
- 'availability_zone': availability_zone,
- 'root_device_name': root_device_name,
- 'progress': 0,
- 'system_metadata': system_metadata}
-
- options_from_image = self._inherit_properties_from_image(
- image, auto_disk_config)
-
- base_options.update(options_from_image)
-
- LOG.debug(_("Going to run %s instances...") % num_instances)
-
- filter_properties = dict(scheduler_hints=scheduler_hints)
- if forced_host:
- check_policy(context, 'create:forced_host', {})
- filter_properties['force_hosts'] = [forced_host]
- if forced_node:
- check_policy(context, 'create:forced_host', {})
- filter_properties['force_nodes'] = [forced_node]
-
for i in xrange(num_instances):
options = base_options.copy()
instance = self.create_db_entry_for_new_instance(
@@ -681,7 +666,6 @@ class API(base.Base):
num_instances, i)
instances.append(instance)
- instance_uuids.append(instance['uuid'])
self._validate_bdm(context, instance)
# send a state update notification for the initial create to
# show it going from non-existent to BUILDING
@@ -693,30 +677,20 @@ class API(base.Base):
except Exception:
with excutils.save_and_reraise_exception():
try:
- for instance_uuid in instance_uuids:
- self.db.instance_destroy(context, instance_uuid)
+ for instance in instances:
+ self.db.instance_destroy(context, instance['uuid'])
finally:
QUOTAS.rollback(context, quota_reservations)
# Commit the reservations
QUOTAS.commit(context, quota_reservations)
-
- request_spec = {
- 'image': jsonutils.to_primitive(image),
- 'instance_properties': base_options,
- 'instance_type': instance_type,
- 'instance_uuids': instance_uuids,
- 'block_device_mapping': block_device_mapping,
- 'security_group': security_groups,
- }
-
- return (instances, request_spec, filter_properties)
+ return instances
def _create_instance(self, context, instance_type,
image_href, kernel_id, ramdisk_id,
min_count, max_count,
display_name, display_description,
- key_name, key_data, security_group,
+ key_name, key_data, security_groups,
availability_zone, user_data, metadata,
injected_files, admin_password,
access_ip_v4, access_ip_v6,
@@ -727,28 +701,48 @@ class API(base.Base):
strategy being performed and schedule the instance(s) for
creation."""
+ # Normalize and setup some parameters
if reservation_id is None:
reservation_id = utils.generate_uid('r')
+ security_groups = security_groups or ['default']
+ min_count = min_count or 1
+ max_count = max_count or min_count
+ block_device_mapping = block_device_mapping or []
+ if not instance_type:
+ instance_type = flavors.get_default_instance_type()
+ image_id, image = self._get_image(context, image_href)
+
+ handle_az = self._handle_availability_zone
+ availability_zone, forced_host, forced_node = handle_az(
+ availability_zone)
+
+ base_options = self._validate_and_build_base_options(context,
+ instance_type, image, image_href, image_id, kernel_id,
+ ramdisk_id, min_count, max_count, display_name,
+ display_description, key_name, key_data, security_groups,
+ availability_zone, user_data, metadata, injected_files,
+ access_ip_v4, access_ip_v6, requested_networks, config_drive,
+ block_device_mapping, auto_disk_config, reservation_id)
+
+ instances = self._provision_instances(context, instance_type,
+ min_count, max_count, base_options, image, security_groups,
+ block_device_mapping)
- (instances, request_spec, filter_properties) = \
- self._validate_and_provision_instance(context, instance_type,
- image_href, kernel_id, ramdisk_id, min_count,
- max_count, display_name, display_description,
- key_name, key_data, security_group, availability_zone,
- user_data, metadata, injected_files, access_ip_v4,
- access_ip_v6, requested_networks, config_drive,
- block_device_mapping, auto_disk_config,
- reservation_id, scheduler_hints)
+ filter_properties = self._build_filter_properties(context,
+ scheduler_hints, forced_host, forced_node, instance_type)
for instance in instances:
self._record_action_start(context, instance,
instance_actions.CREATE)
- self.scheduler_rpcapi.run_instance(context,
- request_spec=request_spec,
- admin_password=admin_password, injected_files=injected_files,
- requested_networks=requested_networks, is_first_time=True,
- filter_properties=filter_properties)
+ self.compute_task_api.build_instances(context,
+ instances=instances, image=image,
+ filter_properties=filter_properties,
+ admin_password=admin_password,
+ injected_files=injected_files,
+ requested_networks=requested_networks,
+ security_groups=security_groups,
+ block_device_mapping=block_device_mapping)
return (instances, reservation_id)
diff --git a/nova/compute/cells_api.py b/nova/compute/cells_api.py
index 4beeaeb3d..5ac5dd475 100644
--- a/nova/compute/cells_api.py
+++ b/nova/compute/cells_api.py
@@ -57,8 +57,18 @@ class SchedulerRPCAPIRedirect(object):
return None
return _noop_rpc_wrapper
- def run_instance(self, context, **kwargs):
- self.cells_rpcapi.schedule_run_instance(context, **kwargs)
+
+class ConductorTaskRPCAPIRedirect(object):
+ def __init__(self, cells_rpcapi_obj):
+ self.cells_rpcapi = cells_rpcapi_obj
+
+ def __getattr__(self, key):
+ def _noop_rpc_wrapper(*args, **kwargs):
+ return None
+ return _noop_rpc_wrapper
+
+ def build_instances(self, context, **kwargs):
+ self.cells_rpcapi.build_instances(context, **kwargs)
class ComputeRPCProxyAPI(compute_rpcapi.ComputeAPI):
@@ -90,6 +100,8 @@ class ComputeCellsAPI(compute_api.API):
self.compute_rpcapi = ComputeRPCAPINoOp()
# Redirect scheduler run_instance to cells.
self.scheduler_rpcapi = SchedulerRPCAPIRedirect(self.cells_rpcapi)
+ # Redirect conductor build_instances to cells
+ self._compute_task_api = ConductorTaskRPCAPIRedirect(self.cells_rpcapi)
def _cell_read_only(self, cell_name):
"""Is the target cell in a read-only mode?"""
diff --git a/nova/conductor/api.py b/nova/conductor/api.py
index ec247d1ac..33415233e 100644
--- a/nova/conductor/api.py
+++ b/nova/conductor/api.py
@@ -349,6 +349,17 @@ class LocalComputeTaskAPI(object):
return self._manager.migrate_server(context, instance, scheduler_hint,
live, rebuild, flavor, block_migration, disk_over_commit)
+ def build_instances(self, context, instances, image,
+ filter_properties, admin_password, injected_files,
+ requested_networks, security_groups, block_device_mapping):
+ utils.spawn_n(self._manager.build_instances, context,
+ instances=instances, image=image,
+ filter_properties=filter_properties,
+ admin_password=admin_password, injected_files=injected_files,
+ requested_networks=requested_networks,
+ security_groups=security_groups,
+ block_device_mapping=block_device_mapping)
+
class API(object):
"""Conductor API that does updates via RPC to the ConductorManager."""
@@ -692,3 +703,14 @@ class ComputeTaskAPI(object):
return self.conductor_compute_rpcapi.migrate_server(context, instance,
scheduler_hint, live, rebuild, flavor, block_migration,
disk_over_commit)
+
+ def build_instances(self, context, instances, image, filter_properties,
+ admin_password, injected_files, requested_networks,
+ security_groups, block_device_mapping):
+ self.conductor_compute_rpcapi.build_instances(context,
+ instances=instances, image=image,
+ filter_properties=filter_properties,
+ admin_password=admin_password, injected_files=injected_files,
+ requested_networks=requested_networks,
+ security_groups=security_groups,
+ block_device_mapping=block_device_mapping)
diff --git a/nova/conductor/manager.py b/nova/conductor/manager.py
index c7f139e4f..a7da96a73 100644
--- a/nova/conductor/manager.py
+++ b/nova/conductor/manager.py
@@ -33,6 +33,7 @@ from nova.openstack.common.rpc import common as rpc_common
from nova.openstack.common import timeutils
from nova import quota
from nova.scheduler import rpcapi as scheduler_rpcapi
+from nova.scheduler import utils as scheduler_utils
LOG = logging.getLogger(__name__)
@@ -514,7 +515,7 @@ class ComputeTaskManager(object):
"""
RPC_API_NAMESPACE = 'compute_task'
- RPC_API_VERSION = '1.1'
+ RPC_API_VERSION = '1.2'
def __init__(self):
self.scheduler_rpcapi = scheduler_rpcapi.SchedulerAPI()
@@ -535,3 +536,15 @@ class ComputeTaskManager(object):
destination = scheduler_hint.get("host")
self.scheduler_rpcapi.live_migration(context, block_migration,
disk_over_commit, instance, destination)
+
+ def build_instances(self, context, instances, image, filter_properties,
+ admin_password, injected_files, requested_networks,
+ security_groups, block_device_mapping):
+ request_spec = scheduler_utils.build_request_spec(image, instances)
+ # NOTE(alaski): For compatibility until a new scheduler method is used.
+ request_spec.update({'block_device_mapping': block_device_mapping,
+ 'security_group': security_groups})
+ self.scheduler_rpcapi.run_instance(context, request_spec=request_spec,
+ admin_password=admin_password, injected_files=injected_files,
+ requested_networks=requested_networks, is_first_time=True,
+ filter_properties=filter_properties)
diff --git a/nova/conductor/rpcapi.py b/nova/conductor/rpcapi.py
index c2a0e1b93..59a4c9ff8 100644
--- a/nova/conductor/rpcapi.py
+++ b/nova/conductor/rpcapi.py
@@ -462,6 +462,7 @@ class ComputeTaskAPI(nova.openstack.common.rpc.proxy.RpcProxy):
1.0 - Initial version (empty).
1.1 - Added unified migrate_server call.
+ 1.2 - Added build_instances
"""
BASE_RPC_API_VERSION = '1.0'
@@ -481,3 +482,16 @@ class ComputeTaskAPI(nova.openstack.common.rpc.proxy.RpcProxy):
flavor=flavor_p, block_migration=block_migration,
disk_over_commit=disk_over_commit)
return self.call(context, msg, version='1.1')
+
+ def build_instances(self, context, instances, image, filter_properties,
+ admin_password, injected_files, requested_networks,
+ security_groups, block_device_mapping):
+ instances_p = [jsonutils.to_primitive(inst) for inst in instances]
+ image_p = jsonutils.to_primitive(image)
+ msg = self.make_msg('build_instances', instances=instances_p,
+ image=image_p, filter_properties=filter_properties,
+ admin_password=admin_password, injected_files=injected_files,
+ requested_networks=requested_networks,
+ security_groups=security_groups,
+ block_device_mapping=block_device_mapping)
+ self.cast(context, msg, version='1.2')
diff --git a/nova/scheduler/utils.py b/nova/scheduler/utils.py
new file mode 100644
index 000000000..315f9df2b
--- /dev/null
+++ b/nova/scheduler/utils.py
@@ -0,0 +1,33 @@
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""Utility methods for scheduling."""
+
+from nova.compute import flavors
+from nova.openstack.common import jsonutils
+
+
+def build_request_spec(image, instances):
+ """Build a request_spec for the scheduler.
+
+ The request_spec assumes that all instances to be scheduled are the same
+ type.
+ """
+ instance = jsonutils.to_primitive(instances[0])
+ request_spec = {
+ 'image': image,
+ 'instance_properties': instance,
+ 'instance_type': flavors.extract_instance_type(instance),
+ 'instance_uuids': [inst['uuid'] for inst in instances]}
+ return request_spec
diff --git a/nova/tests/api/ec2/test_cinder_cloud.py b/nova/tests/api/ec2/test_cinder_cloud.py
index 5d02a28d7..a307eaa2f 100644
--- a/nova/tests/api/ec2/test_cinder_cloud.py
+++ b/nova/tests/api/ec2/test_cinder_cloud.py
@@ -34,6 +34,7 @@ from nova import exception
from nova.openstack.common import rpc
from nova import test
from nova.tests import fake_network
+from nova.tests import fake_utils
from nova.tests.image import fake
from nova.tests import matchers
from nova import volume
@@ -88,6 +89,7 @@ class CinderCloudTestCase(test.TestCase):
super(CinderCloudTestCase, self).setUp()
ec2utils.reset_cache()
vol_tmpdir = self.useFixture(fixtures.TempDir()).path
+ fake_utils.stub_out_utils_spawn_n(self.stubs)
self.flags(compute_driver='nova.virt.fake.FakeDriver',
volume_api_class='nova.tests.fake_volume.API')
diff --git a/nova/tests/api/ec2/test_cloud.py b/nova/tests/api/ec2/test_cloud.py
index e9ff10be0..22f9c2d81 100644
--- a/nova/tests/api/ec2/test_cloud.py
+++ b/nova/tests/api/ec2/test_cloud.py
@@ -50,6 +50,7 @@ from nova import test
from nova.tests.api.openstack.compute.contrib import (
test_quantum_security_groups as test_quantum)
from nova.tests import fake_network
+from nova.tests import fake_utils
from nova.tests.image import fake
from nova.tests import matchers
from nova import utils
@@ -112,6 +113,7 @@ class CloudTestCase(test.TestCase):
self.flags(compute_driver='nova.virt.fake.FakeDriver',
volume_api_class='nova.tests.fake_volume.API')
self.useFixture(fixtures.FakeLogger('boto'))
+ fake_utils.stub_out_utils_spawn_n(self.stubs)
def fake_show(meh, context, id):
return {'id': id,
diff --git a/nova/tests/cells/test_cells_manager.py b/nova/tests/cells/test_cells_manager.py
index 137d48ff6..543ff66e7 100644
--- a/nova/tests/cells/test_cells_manager.py
+++ b/nova/tests/cells/test_cells_manager.py
@@ -122,6 +122,15 @@ class CellsManagerClassTestCase(test.TestCase):
self.cells_manager.schedule_run_instance(self.ctxt,
host_sched_kwargs=host_sched_kwargs)
+ def test_build_instances(self):
+ build_inst_kwargs = {'instances': [1, 2]}
+ self.mox.StubOutWithMock(self.msg_runner, 'build_instances')
+ our_cell = self.msg_runner.state_manager.get_my_state()
+ self.msg_runner.build_instances(self.ctxt, our_cell, build_inst_kwargs)
+ self.mox.ReplayAll()
+ self.cells_manager.build_instances(self.ctxt,
+ build_inst_kwargs=build_inst_kwargs)
+
def test_run_compute_api_method(self):
# Args should just be silently passed through
cell_name = 'fake-cell-name'
diff --git a/nova/tests/cells/test_cells_messaging.py b/nova/tests/cells/test_cells_messaging.py
index 1de39de1f..9aae11201 100644
--- a/nova/tests/cells/test_cells_messaging.py
+++ b/nova/tests/cells/test_cells_messaging.py
@@ -608,6 +608,16 @@ class CellsTargetedMethodsTestCase(test.TestCase):
self.tgt_cell_name,
host_sched_kwargs)
+ def test_build_instances(self):
+ build_inst_kwargs = {'filter_properties': {},
+ 'key1': 'value1',
+ 'key2': 'value2'}
+ self.mox.StubOutWithMock(self.tgt_scheduler, 'build_instances')
+ self.tgt_scheduler.build_instances(self.ctxt, build_inst_kwargs)
+ self.mox.ReplayAll()
+ self.src_msg_runner.build_instances(self.ctxt, self.tgt_cell_name,
+ build_inst_kwargs)
+
def test_run_compute_api_method(self):
instance_uuid = 'fake_instance_uuid'
diff --git a/nova/tests/cells/test_cells_rpcapi.py b/nova/tests/cells/test_cells_rpcapi.py
index 76c9f05d3..172b54831 100644
--- a/nova/tests/cells/test_cells_rpcapi.py
+++ b/nova/tests/cells/test_cells_rpcapi.py
@@ -114,6 +114,21 @@ class CellsAPITestCase(test.TestCase):
self._check_result(call_info, 'schedule_run_instance',
expected_args)
+ def test_build_instances(self):
+ call_info = self._stub_rpc_method('cast', None)
+
+ self.cells_rpcapi.build_instances(
+ self.fake_context, instances=['1', '2'],
+ image={'fake': 'image'}, arg1=1, arg2=2, arg3=3)
+
+ expected_args = {'build_inst_kwargs': {'instances': ['1', '2'],
+ 'image': {'fake': 'image'},
+ 'arg1': 1,
+ 'arg2': 2,
+ 'arg3': 3}}
+ self._check_result(call_info, 'build_instances',
+ expected_args, version=1.8)
+
def test_instance_update_at_top(self):
fake_info_cache = {'id': 1,
'instance': 'fake_instance',
diff --git a/nova/tests/cells/test_cells_scheduler.py b/nova/tests/cells/test_cells_scheduler.py
index c8f90619e..9cd637cdf 100644
--- a/nova/tests/cells/test_cells_scheduler.py
+++ b/nova/tests/cells/test_cells_scheduler.py
@@ -26,6 +26,7 @@ from nova import context
from nova import db
from nova import exception
from nova.openstack.common import uuidutils
+from nova.scheduler import utils as scheduler_utils
from nova import test
from nova.tests.cells import fakes
@@ -73,24 +74,32 @@ class CellsSchedulerTestCase(test.TestCase):
for x in xrange(3):
instance_uuids.append(uuidutils.generate_uuid())
self.instance_uuids = instance_uuids
- self.request_spec = {'instance_uuids': instance_uuids,
- 'other': 'stuff'}
+ self.instances = [{'uuid': uuid} for uuid in instance_uuids]
+ self.request_spec = {
+ 'instance_uuids': instance_uuids,
+ 'instance_properties': 'fake_properties',
+ 'instance_type': 'fake_type',
+ 'image': 'fake_image',
+ 'security_group': 'fake_sec_groups',
+ 'block_device_mapping': 'fake_bdm'}
+ self.build_inst_kwargs = {
+ 'instances': self.instances,
+ 'instance_type': 'fake_type',
+ 'image': 'fake_image',
+ 'filter_properties': {},
+ 'security_group': 'fake_sec_groups',
+ 'block_device_mapping': 'fake_bdm'}
def test_create_instances_here(self):
# Just grab the first instance type
inst_type = db.instance_type_get(self.ctxt, 1)
image = {'properties': {}}
+ instance_uuids = self.instance_uuids
instance_props = {'hostname': 'meow',
'display_name': 'moo',
'image_ref': 'fake_image_ref',
'user_id': self.ctxt.user_id,
'project_id': self.ctxt.project_id}
- request_spec = {'instance_type': inst_type,
- 'image': image,
- 'security_group': ['default'],
- 'block_device_mapping': [],
- 'instance_properties': instance_props,
- 'instance_uuids': self.instance_uuids}
call_info = {'uuids': []}
@@ -100,10 +109,11 @@ class CellsSchedulerTestCase(test.TestCase):
self.stubs.Set(self.msg_runner, 'instance_update_at_top',
_fake_instance_update_at_top)
- self.scheduler._create_instances_here(self.ctxt, request_spec)
- self.assertEqual(self.instance_uuids, call_info['uuids'])
+ self.scheduler._create_instances_here(self.ctxt, instance_uuids,
+ instance_props, inst_type, image, ['default'], [])
+ self.assertEqual(instance_uuids, call_info['uuids'])
- for instance_uuid in self.instance_uuids:
+ for instance_uuid in instance_uuids:
instance = db.instance_get_by_uuid(self.ctxt, instance_uuid)
self.assertEqual('meow', instance['hostname'])
self.assertEqual('moo-%s' % instance['uuid'],
@@ -146,6 +156,48 @@ class CellsSchedulerTestCase(test.TestCase):
child_cells = self.state_manager.get_child_cells()
self.assertIn(call_info['target_cell'], child_cells)
+ def test_build_instances_selects_child_cell(self):
+ # Make sure there's no capacity info so we're sure to
+ # select a child cell
+ our_cell_info = self.state_manager.get_my_state()
+ our_cell_info.capacities = {}
+
+ call_info = {'times': 0}
+
+ orig_fn = self.msg_runner.build_instances
+
+ def msg_runner_build_instances(ctxt, target_cell, build_inst_kwargs):
+ # This gets called twice. Once for our running it
+ # in this cell.. and then it'll get called when the
+ # child cell is picked. So, first time.. just run it
+ # like normal.
+ if not call_info['times']:
+ call_info['times'] += 1
+ return orig_fn(ctxt, target_cell, build_inst_kwargs)
+ call_info['ctxt'] = ctxt
+ call_info['target_cell'] = target_cell
+ call_info['build_inst_kwargs'] = build_inst_kwargs
+
+ def fake_build_request_spec(image, instances):
+ request_spec = {
+ 'instance_uuids': [inst['uuid'] for inst in instances],
+ 'image': image}
+ return request_spec
+
+ self.stubs.Set(self.msg_runner, 'build_instances',
+ msg_runner_build_instances)
+ self.stubs.Set(scheduler_utils, 'build_request_spec',
+ fake_build_request_spec)
+
+ self.msg_runner.build_instances(self.ctxt, self.my_cell_state,
+ self.build_inst_kwargs)
+
+ self.assertEqual(self.ctxt, call_info['ctxt'])
+ self.assertEqual(self.build_inst_kwargs,
+ call_info['build_inst_kwargs'])
+ child_cells = self.state_manager.get_child_cells()
+ self.assertIn(call_info['target_cell'], child_cells)
+
def test_run_instance_selects_current_cell(self):
# Make sure there's no child cells so that we will be
# selected
@@ -153,9 +205,16 @@ class CellsSchedulerTestCase(test.TestCase):
call_info = {}
- def fake_create_instances_here(ctxt, request_spec):
+ def fake_create_instances_here(ctxt, instance_uuids,
+ instance_properties, instance_type, image, security_groups,
+ block_device_mapping):
call_info['ctxt'] = ctxt
- call_info['request_spec'] = request_spec
+ call_info['instance_uuids'] = instance_uuids
+ call_info['instance_properties'] = instance_properties
+ call_info['instance_type'] = instance_type
+ call_info['image'] = image
+ call_info['security_groups'] = security_groups
+ call_info['block_device_mapping'] = block_device_mapping
def fake_rpc_run_instance(ctxt, **host_sched_kwargs):
call_info['host_sched_kwargs'] = host_sched_kwargs
@@ -172,8 +231,69 @@ class CellsSchedulerTestCase(test.TestCase):
self.my_cell_state, host_sched_kwargs)
self.assertEqual(self.ctxt, call_info['ctxt'])
- self.assertEqual(self.request_spec, call_info['request_spec'])
self.assertEqual(host_sched_kwargs, call_info['host_sched_kwargs'])
+ self.assertEqual(self.instance_uuids, call_info['instance_uuids'])
+ self.assertEqual(self.request_spec['instance_properties'],
+ call_info['instance_properties'])
+ self.assertEqual(self.request_spec['instance_type'],
+ call_info['instance_type'])
+ self.assertEqual(self.request_spec['image'], call_info['image'])
+ self.assertEqual(self.request_spec['security_group'],
+ call_info['security_groups'])
+ self.assertEqual(self.request_spec['block_device_mapping'],
+ call_info['block_device_mapping'])
+
+ def test_build_instances_selects_current_cell(self):
+ # Make sure there's no child cells so that we will be
+ # selected
+ self.state_manager.child_cells = {}
+
+ call_info = {}
+
+ def fake_create_instances_here(ctxt, instance_uuids,
+ instance_properties, instance_type, image, security_groups,
+ block_device_mapping):
+ call_info['ctxt'] = ctxt
+ call_info['instance_uuids'] = instance_uuids
+ call_info['instance_properties'] = instance_properties
+ call_info['instance_type'] = instance_type
+ call_info['image'] = image
+ call_info['security_groups'] = security_groups
+ call_info['block_device_mapping'] = block_device_mapping
+
+ def fake_rpc_build_instances(ctxt, **build_inst_kwargs):
+ call_info['build_inst_kwargs'] = build_inst_kwargs
+
+ def fake_build_request_spec(image, instances):
+ request_spec = {
+ 'instance_uuids': [inst['uuid'] for inst in instances],
+ 'image': image}
+ return request_spec
+
+ self.stubs.Set(self.scheduler, '_create_instances_here',
+ fake_create_instances_here)
+ self.stubs.Set(self.scheduler.compute_task_api,
+ 'build_instances', fake_rpc_build_instances)
+ self.stubs.Set(scheduler_utils, 'build_request_spec',
+ fake_build_request_spec)
+
+ self.msg_runner.build_instances(self.ctxt, self.my_cell_state,
+ self.build_inst_kwargs)
+
+ self.assertEqual(self.ctxt, call_info['ctxt'])
+ self.assertEqual(self.instance_uuids, call_info['instance_uuids'])
+ self.assertEqual(self.build_inst_kwargs['instances'][0],
+ call_info['instance_properties'])
+ self.assertEqual(self.build_inst_kwargs['instance_type'],
+ call_info['instance_type'])
+ self.assertEqual(self.build_inst_kwargs['image'], call_info['image'])
+ self.assertEqual(self.build_inst_kwargs['security_group'],
+ call_info['security_groups'])
+ self.assertEqual(self.build_inst_kwargs['block_device_mapping'],
+ call_info['block_device_mapping'])
+ self.assertEqual(self.build_inst_kwargs,
+ call_info['build_inst_kwargs'])
+ self.assertEqual(self.instance_uuids, call_info['instance_uuids'])
def test_run_instance_retries_when_no_cells_avail(self):
self.flags(scheduler_retries=7, group='cells')
@@ -183,7 +303,7 @@ class CellsSchedulerTestCase(test.TestCase):
call_info = {'num_tries': 0, 'errored_uuids': []}
- def fake_run_instance(message, host_sched_kwargs):
+ def fake_grab_target_cells(filter_properties):
call_info['num_tries'] += 1
raise exception.NoCellsAvailable()
@@ -194,7 +314,8 @@ class CellsSchedulerTestCase(test.TestCase):
self.assertEqual(vm_states.ERROR, values['vm_state'])
call_info['errored_uuids'].append(instance_uuid)
- self.stubs.Set(self.scheduler, '_run_instance', fake_run_instance)
+ self.stubs.Set(self.scheduler, '_grab_target_cells',
+ fake_grab_target_cells)
self.stubs.Set(time, 'sleep', fake_sleep)
self.stubs.Set(db, 'instance_update', fake_instance_update)
@@ -204,17 +325,55 @@ class CellsSchedulerTestCase(test.TestCase):
self.assertEqual(8, call_info['num_tries'])
self.assertEqual(self.instance_uuids, call_info['errored_uuids'])
- def test_run_instance_on_random_exception(self):
+ def test_build_instances_retries_when_no_cells_avail(self):
self.flags(scheduler_retries=7, group='cells')
- host_sched_kwargs = {'request_spec': self.request_spec,
- 'filter_properties': {}}
+ call_info = {'num_tries': 0, 'errored_uuids': []}
+
+ def fake_grab_target_cells(filter_properties):
+ call_info['num_tries'] += 1
+ raise exception.NoCellsAvailable()
+
+ def fake_sleep(_secs):
+ return
+
+ def fake_instance_update(ctxt, instance_uuid, values):
+ self.assertEqual(vm_states.ERROR, values['vm_state'])
+ call_info['errored_uuids'].append(instance_uuid)
+
+ def fake_build_request_spec(image, instances):
+ request_spec = {
+ 'instance_uuids': [inst['uuid'] for inst in instances],
+ 'image': image}
+ return request_spec
+
+ self.stubs.Set(self.scheduler, '_grab_target_cells',
+ fake_grab_target_cells)
+ self.stubs.Set(time, 'sleep', fake_sleep)
+ self.stubs.Set(db, 'instance_update', fake_instance_update)
+ self.stubs.Set(scheduler_utils, 'build_request_spec',
+ fake_build_request_spec)
+
+ self.msg_runner.build_instances(self.ctxt, self.my_cell_state,
+ self.build_inst_kwargs)
+
+ self.assertEqual(8, call_info['num_tries'])
+ self.assertEqual(self.instance_uuids, call_info['errored_uuids'])
+
+ def test_schedule_method_on_random_exception(self):
+ self.flags(scheduler_retries=7, group='cells')
+
+ instances = [{'uuid': uuid} for uuid in self.instance_uuids]
+ method_kwargs = {
+ 'image': 'fake_image',
+ 'instances': instances,
+ 'filter_properties': {}}
call_info = {'num_tries': 0,
'errored_uuids1': [],
'errored_uuids2': []}
- def fake_run_instance(message, host_sched_kwargs):
+ def fake_grab_target_cells(filter_properties):
call_info['num_tries'] += 1
raise test.TestingException()
@@ -226,13 +385,22 @@ class CellsSchedulerTestCase(test.TestCase):
self.assertEqual(vm_states.ERROR, instance['vm_state'])
call_info['errored_uuids2'].append(instance['uuid'])
- self.stubs.Set(self.scheduler, '_run_instance', fake_run_instance)
+ def fake_build_request_spec(image, instances):
+ request_spec = {
+ 'instance_uuids': [inst['uuid'] for inst in instances],
+ 'image': image}
+ return request_spec
+
+ self.stubs.Set(self.scheduler, '_grab_target_cells',
+ fake_grab_target_cells)
self.stubs.Set(db, 'instance_update', fake_instance_update)
self.stubs.Set(self.msg_runner, 'instance_update_at_top',
- fake_instance_update_at_top)
+ fake_instance_update_at_top)
+ self.stubs.Set(scheduler_utils, 'build_request_spec',
+ fake_build_request_spec)
- self.msg_runner.schedule_run_instance(self.ctxt,
- self.my_cell_state, host_sched_kwargs)
+ self.msg_runner.build_instances(self.ctxt, self.my_cell_state,
+ method_kwargs)
# Shouldn't retry
self.assertEqual(1, call_info['num_tries'])
self.assertEqual(self.instance_uuids, call_info['errored_uuids1'])
@@ -252,9 +420,16 @@ class CellsSchedulerTestCase(test.TestCase):
call_info = {}
- def fake_create_instances_here(ctxt, request_spec):
+ def fake_create_instances_here(ctxt, instance_uuids,
+ instance_properties, instance_type, image, security_groups,
+ block_device_mapping):
call_info['ctxt'] = ctxt
- call_info['request_spec'] = request_spec
+ call_info['instance_uuids'] = instance_uuids
+ call_info['instance_properties'] = instance_properties
+ call_info['instance_type'] = instance_type
+ call_info['image'] = image
+ call_info['security_groups'] = security_groups
+ call_info['block_device_mapping'] = block_device_mapping
def fake_rpc_run_instance(ctxt, **host_sched_kwargs):
call_info['host_sched_kwargs'] = host_sched_kwargs
@@ -281,7 +456,16 @@ class CellsSchedulerTestCase(test.TestCase):
self.my_cell_state, host_sched_kwargs)
# Our cell was selected.
self.assertEqual(self.ctxt, call_info['ctxt'])
- self.assertEqual(self.request_spec, call_info['request_spec'])
+ self.assertEqual(self.instance_uuids, call_info['instance_uuids'])
+ self.assertEqual(self.request_spec['instance_properties'],
+ call_info['instance_properties'])
+ self.assertEqual(self.request_spec['instance_type'],
+ call_info['instance_type'])
+ self.assertEqual(self.request_spec['image'], call_info['image'])
+ self.assertEqual(self.request_spec['security_group'],
+ call_info['security_groups'])
+ self.assertEqual(self.request_spec['block_device_mapping'],
+ call_info['block_device_mapping'])
self.assertEqual(host_sched_kwargs, call_info['host_sched_kwargs'])
# Filter args are correct
expected_filt_props = {'context': self.ctxt,
@@ -341,9 +525,16 @@ class CellsSchedulerTestCase(test.TestCase):
call_info = {}
- def fake_create_instances_here(ctxt, request_spec):
+ def fake_create_instances_here(ctxt, instance_uuids,
+ instance_properties, instance_type, image, security_groups,
+ block_device_mapping):
call_info['ctxt'] = ctxt
- call_info['request_spec'] = request_spec
+ call_info['instance_uuids'] = instance_uuids
+ call_info['instance_properties'] = instance_properties
+ call_info['instance_type'] = instance_type
+ call_info['image'] = image
+ call_info['security_groups'] = security_groups
+ call_info['block_device_mapping'] = block_device_mapping
def fake_rpc_run_instance(ctxt, **host_sched_kwargs):
call_info['host_sched_kwargs'] = host_sched_kwargs
@@ -370,7 +561,16 @@ class CellsSchedulerTestCase(test.TestCase):
self.my_cell_state, host_sched_kwargs)
# Our cell was selected.
self.assertEqual(self.ctxt, call_info['ctxt'])
- self.assertEqual(self.request_spec, call_info['request_spec'])
+ self.assertEqual(self.instance_uuids, call_info['instance_uuids'])
+ self.assertEqual(self.request_spec['instance_properties'],
+ call_info['instance_properties'])
+ self.assertEqual(self.request_spec['instance_type'],
+ call_info['instance_type'])
+ self.assertEqual(self.request_spec['image'], call_info['image'])
+ self.assertEqual(self.request_spec['security_group'],
+ call_info['security_groups'])
+ self.assertEqual(self.request_spec['block_device_mapping'],
+ call_info['block_device_mapping'])
self.assertEqual(host_sched_kwargs, call_info['host_sched_kwargs'])
# Weight args are correct
expected_filt_props = {'context': self.ctxt,
diff --git a/nova/tests/conductor/test_conductor.py b/nova/tests/conductor/test_conductor.py
index 637596fc5..8b397db02 100644
--- a/nova/tests/conductor/test_conductor.py
+++ b/nova/tests/conductor/test_conductor.py
@@ -1196,6 +1196,41 @@ class _BaseTaskTestCase(object):
self.assertRaises(NotImplementedError, self.conductor.migrate_server,
self.context, None, None, True, False, "dummy", None, None)
+ def test_build_instances(self):
+ instance_type = flavors.get_default_instance_type()
+ system_metadata = flavors.save_instance_type_info({}, instance_type)
+ # NOTE(alaski): instance_type -> system_metadata -> instance_type loses
+ # some data (extra_specs) so we need both for testing.
+ instance_type_extract = flavors.extract_instance_type(
+ {'system_metadata': system_metadata})
+ self.mox.StubOutWithMock(self.conductor_manager.scheduler_rpcapi,
+ 'run_instance')
+ self.conductor_manager.scheduler_rpcapi.run_instance(self.context,
+ request_spec={
+ 'image': {'fake_data': 'should_pass_silently'},
+ 'instance_properties': {'system_metadata': system_metadata,
+ 'uuid': 'fakeuuid'},
+ 'instance_type': instance_type_extract,
+ 'instance_uuids': ['fakeuuid', 'fakeuuid2'],
+ 'block_device_mapping': 'block_device_mapping',
+ 'security_group': 'security_groups'},
+ admin_password='admin_password',
+ injected_files='injected_files',
+ requested_networks='requested_networks', is_first_time=True,
+ filter_properties={})
+ self.mox.ReplayAll()
+ self.conductor.build_instances(self.context,
+ instances=[{'uuid': 'fakeuuid',
+ 'system_metadata': system_metadata},
+ {'uuid': 'fakeuuid2'}],
+ image={'fake_data': 'should_pass_silently'},
+ filter_properties={},
+ admin_password='admin_password',
+ injected_files='injected_files',
+ requested_networks='requested_networks',
+ security_groups='security_groups',
+ block_device_mapping='block_device_mapping')
+
class ConductorTaskTestCase(_BaseTaskTestCase, test.TestCase):
"""ComputeTaskManager Tests."""
diff --git a/nova/tests/fake_utils.py b/nova/tests/fake_utils.py
new file mode 100644
index 000000000..cb73bc8bb
--- /dev/null
+++ b/nova/tests/fake_utils.py
@@ -0,0 +1,28 @@
+# Copyright (c) 2013 Rackspace Hosting
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""This modules stubs out functions in nova.utils."""
+
+from nova import utils
+
+
+def stub_out_utils_spawn_n(stubs):
+ """Stubs out spawn_n with a blocking version.
+
+ This aids testing async processes by blocking until they're done.
+ """
+ def no_spawn(func, *args, **kwargs):
+ return func(*args, **kwargs)
+
+ stubs.Set(utils, 'spawn_n', no_spawn)
diff --git a/nova/tests/integrated/test_api_samples.py b/nova/tests/integrated/test_api_samples.py
index cfd1877f5..c3895678c 100644
--- a/nova/tests/integrated/test_api_samples.py
+++ b/nova/tests/integrated/test_api_samples.py
@@ -54,6 +54,7 @@ from nova.tests.api.openstack.compute.contrib import test_services
from nova.tests.api.openstack import fakes
from nova.tests import fake_instance_actions
from nova.tests import fake_network
+from nova.tests import fake_utils
from nova.tests.image import fake
from nova.tests.integrated import integrated_helpers
from nova.tests import utils as test_utils
@@ -95,6 +96,7 @@ class ApiSampleTestBase(integrated_helpers._IntegratedTestBase):
super(ApiSampleTestBase, self).setUp()
self.useFixture(test.SampleNetworks())
fake_network.stub_compute_with_ips(self.stubs)
+ fake_utils.stub_out_utils_spawn_n(self.stubs)
self.generate_samples = os.getenv('GENERATE_SAMPLES') is not None
def _pretty_data(self, data):
diff --git a/nova/utils.py b/nova/utils.py
index 94c425cc1..f54e72d63 100644
--- a/nova/utils.py
+++ b/nova/utils.py
@@ -37,6 +37,7 @@ import tempfile
import time
from xml.sax import saxutils
+import eventlet
import netaddr
from oslo.config import cfg
@@ -1101,3 +1102,12 @@ def check_string_length(value, name, min_length=0, max_length=None):
msg = _("%(name)s has more than %(max_length)s "
"characters.") % locals()
raise exception.InvalidInput(message=msg)
+
+
+def spawn_n(func, *args, **kwargs):
+ """Passthrough method for eventlet.spawn_n.
+
+ This utility exists so that it can be stubbed for testing without
+ interfering with the service spawns.
+ """
+ eventlet.spawn_n(func, *args, **kwargs)