diff options
| author | Chris Behrens <cbehrens@codestud.com> | 2011-06-29 23:02:53 +0000 |
|---|---|---|
| committer | Tarmac <> | 2011-06-29 23:02:53 +0000 |
| commit | d77526e357fe3764107d4d5fda768b69b52fb015 (patch) | |
| tree | f21bb20974027103bd694e2ebb4d4ec425914047 | |
| parent | 17d08b9644080265f6feb4d55825de8e4049c991 (diff) | |
| parent | 7555aca28a5ab1ba4dd1be04a91bf6347eaac84f (diff) | |
| download | nova-d77526e357fe3764107d4d5fda768b69b52fb015.tar.gz nova-d77526e357fe3764107d4d5fda768b69b52fb015.tar.xz nova-d77526e357fe3764107d4d5fda768b69b52fb015.zip | |
compute_api.get_all should be able to recurse zones (bug 744217).
Also, allow to build more than one instance at once with zone_aware_scheduler types.
Other cleanups with regards to zone aware scheduler...
| -rw-r--r-- | nova/api/openstack/create_instance_helper.py | 13 | ||||
| -rw-r--r-- | nova/api/openstack/servers.py | 24 | ||||
| -rw-r--r-- | nova/compute/api.py | 84 | ||||
| -rw-r--r-- | nova/scheduler/api.py | 49 | ||||
| -rw-r--r-- | nova/scheduler/host_filter.py | 8 | ||||
| -rw-r--r-- | nova/scheduler/least_cost.py | 46 | ||||
| -rw-r--r-- | nova/scheduler/zone_aware_scheduler.py | 115 | ||||
| -rw-r--r-- | nova/tests/scheduler/test_least_cost_scheduler.py | 11 | ||||
| -rw-r--r-- | nova/tests/scheduler/test_scheduler.py | 4 | ||||
| -rw-r--r-- | nova/tests/scheduler/test_zone_aware_scheduler.py | 39 | ||||
| -rw-r--r-- | nova/tests/test_utils.py | 13 | ||||
| -rw-r--r-- | nova/utils.py | 11 | ||||
| -rw-r--r-- | tools/pip-requires | 2 |
13 files changed, 291 insertions, 128 deletions
diff --git a/nova/api/openstack/create_instance_helper.py b/nova/api/openstack/create_instance_helper.py index 436e524c1..1066713a3 100644 --- a/nova/api/openstack/create_instance_helper.py +++ b/nova/api/openstack/create_instance_helper.py @@ -114,6 +114,15 @@ class CreateInstanceHelper(object): name = name.strip() reservation_id = body['server'].get('reservation_id') + min_count = body['server'].get('min_count') + max_count = body['server'].get('max_count') + # min_count and max_count are optional. If they exist, they come + # in as strings. We want to default 'min_count' to 1, and default + # 'max_count' to be 'min_count'. + min_count = int(min_count) if min_count else 1 + max_count = int(max_count) if max_count else min_count + if min_count > max_count: + min_count = max_count try: inst_type = \ @@ -137,7 +146,9 @@ class CreateInstanceHelper(object): injected_files=injected_files, admin_password=password, zone_blob=zone_blob, - reservation_id=reservation_id)) + reservation_id=reservation_id, + min_count=min_count, + max_count=max_count)) except quota.QuotaError as error: self._handle_quota_error(error) except exception.ImageNotFound as error: diff --git a/nova/api/openstack/servers.py b/nova/api/openstack/servers.py index b82a6de19..fc1ab8d46 100644 --- a/nova/api/openstack/servers.py +++ b/nova/api/openstack/servers.py @@ -76,10 +76,17 @@ class Controller(object): builder - the response model builder """ - reservation_id = req.str_GET.get('reservation_id') + query_str = req.str_GET + reservation_id = query_str.get('reservation_id') + project_id = query_str.get('project_id') + fixed_ip = query_str.get('fixed_ip') + recurse_zones = utils.bool_from_str(query_str.get('recurse_zones')) instance_list = self.compute_api.get_all( - req.environ['nova.context'], - reservation_id=reservation_id) + req.environ['nova.context'], + reservation_id=reservation_id, + project_id=project_id, + fixed_ip=fixed_ip, + recurse_zones=recurse_zones) limited_list = self._limit_items(instance_list, req) builder = self._get_view_builder(req) servers = [builder.build(inst, is_detail)['server'] @@ -111,14 +118,15 @@ class Controller(object): extra_values = None result = None try: - extra_values, result = self.helper.create_instance( - req, body, self.compute_api.create) + extra_values, instances = self.helper.create_instance( + req, body, self.compute_api.create) except faults.Fault, f: return f - instances = result - - (inst, ) = instances + # We can only return 1 instance via the API, if we happen to + # build more than one... instances is a list, so we'll just + # use the first one.. + inst = instances[0] for key in ['instance_type', 'image_ref']: inst[key] = extra_values[key] diff --git a/nova/compute/api.py b/nova/compute/api.py index 59a392e7a..e7d9f1bc3 100644 --- a/nova/compute/api.py +++ b/nova/compute/api.py @@ -143,7 +143,7 @@ class API(base.Base): def _check_create_parameters(self, context, instance_type, image_href, kernel_id=None, ramdisk_id=None, - min_count=1, max_count=1, + min_count=None, max_count=None, display_name='', display_description='', key_name=None, key_data=None, security_group='default', availability_zone=None, user_data=None, metadata={}, @@ -154,6 +154,10 @@ class API(base.Base): if not instance_type: instance_type = instance_types.get_default_instance_type() + if not min_count: + min_count = 1 + if not max_count: + max_count = min_count num_instances = quota.allowed_instances(context, max_count, instance_type) @@ -338,7 +342,7 @@ class API(base.Base): def create_all_at_once(self, context, instance_type, image_href, kernel_id=None, ramdisk_id=None, - min_count=1, max_count=1, + min_count=None, max_count=None, display_name='', display_description='', key_name=None, key_data=None, security_group='default', availability_zone=None, user_data=None, metadata={}, @@ -368,7 +372,7 @@ class API(base.Base): def create(self, context, instance_type, image_href, kernel_id=None, ramdisk_id=None, - min_count=1, max_count=1, + min_count=None, max_count=None, display_name='', display_description='', key_name=None, key_data=None, security_group='default', availability_zone=None, user_data=None, metadata={}, @@ -613,50 +617,60 @@ class API(base.Base): """ return self.get(context, instance_id) - def get_all_across_zones(self, context, reservation_id): - """Get all instances with this reservation_id, across - all available Zones (if any). - """ - context = context.elevated() - instances = self.db.instance_get_all_by_reservation( - context, reservation_id) - - children = scheduler_api.call_zone_method(context, "list", - novaclient_collection_name="servers", - reservation_id=reservation_id) - - for zone, servers in children: - for server in servers: - # Results are ready to send to user. No need to scrub. - server._info['_is_precooked'] = True - instances.append(server._info) - return instances - def get_all(self, context, project_id=None, reservation_id=None, - fixed_ip=None): + fixed_ip=None, recurse_zones=False): """Get all instances filtered by one of the given parameters. If there is no filter and the context is an admin, it will retreive all instances in the system. """ - if reservation_id is not None: - return self.get_all_across_zones(context, reservation_id) - if fixed_ip is not None: - return self.db.fixed_ip_get_instance(context, fixed_ip) - - if project_id or not context.is_admin: + if reservation_id is not None: + recurse_zones = True + instances = self.db.instance_get_all_by_reservation( + context, reservation_id) + elif fixed_ip is not None: + try: + instances = self.db.fixed_ip_get_instance(context, fixed_ip) + except exception.FloatingIpNotFound, e: + if not recurse_zones: + raise + instances = None + elif project_id or not context.is_admin: if not context.project: - return self.db.instance_get_all_by_user( + instances = self.db.instance_get_all_by_user( context, context.user_id) + else: + if project_id is None: + project_id = context.project_id + instances = self.db.instance_get_all_by_project( + context, project_id) + else: + instances = self.db.instance_get_all(context) + + if instances is None: + instances = [] + elif not isinstance(instances, list): + instances = [instances] - if project_id is None: - project_id = context.project_id + if not recurse_zones: + return instances - return self.db.instance_get_all_by_project( - context, project_id) + admin_context = context.elevated() + children = scheduler_api.call_zone_method(admin_context, + "list", + novaclient_collection_name="servers", + reservation_id=reservation_id, + project_id=project_id, + fixed_ip=fixed_ip, + recurse_zones=True) - return self.db.instance_get_all(context) + for zone, servers in children: + for server in servers: + # Results are ready to send to user. No need to scrub. + server._info['_is_precooked'] = True + instances.append(server._info) + return instances def _cast_compute_message(self, method, context, instance_id, host=None, params=None): diff --git a/nova/scheduler/api.py b/nova/scheduler/api.py index 1bb047e2e..5d62beb86 100644 --- a/nova/scheduler/api.py +++ b/nova/scheduler/api.py @@ -162,32 +162,53 @@ def child_zone_helper(zone_list, func): _wrap_method(_process, func), zone_list)] -def _issue_novaclient_command(nova, zone, collection, method_name, item_id): +def _issue_novaclient_command(nova, zone, collection, + method_name, *args, **kwargs): """Use novaclient to issue command to a single child zone. - One of these will be run in parallel for each child zone.""" + One of these will be run in parallel for each child zone. + """ manager = getattr(nova, collection) - result = None - try: + + # NOTE(comstud): This is not ideal, but we have to do this based on + # how novaclient is implemented right now. + # 'find' is special cased as novaclient requires kwargs for it to + # filter on a 'get_all'. + # Every other method first needs to do a 'get' on the first argument + # passed, which should be a UUID. If it's 'get' itself that we want, + # we just return the result. Otherwise, we next call the real method + # that's wanted... passing other arguments that may or may not exist. + if method_name in ['find', 'findall']: try: - result = manager.get(int(item_id)) - except ValueError, e: - result = manager.find(name=item_id) + return getattr(manager, method_name)(**kwargs) + except novaclient.NotFound: + url = zone.api_url + LOG.debug(_("%(collection)s.%(method_name)s didn't find " + "anything matching '%(kwargs)s' on '%(url)s'" % + locals())) + return None + + args = list(args) + # pop off the UUID to look up + item = args.pop(0) + try: + result = manager.get(item) except novaclient.NotFound: url = zone.api_url - LOG.debug(_("%(collection)s '%(item_id)s' not found on '%(url)s'" % + LOG.debug(_("%(collection)s '%(item)s' not found on '%(url)s'" % locals())) return None - if method_name.lower() not in ['get', 'find']: - result = getattr(result, method_name)() + if method_name.lower() != 'get': + # if we're doing something other than 'get', call it passing args. + result = getattr(result, method_name)(*args, **kwargs) return result -def wrap_novaclient_function(f, collection, method_name, item_id): - """Appends collection, method_name and item_id to the incoming +def wrap_novaclient_function(f, collection, method_name, *args, **kwargs): + """Appends collection, method_name and arguments to the incoming (nova, zone) call from child_zone_helper.""" def inner(nova, zone): - return f(nova, zone, collection, method_name, item_id) + return f(nova, zone, collection, method_name, *args, **kwargs) return inner @@ -220,7 +241,7 @@ class reroute_compute(object): the wrapped method. (This ensures that zone-local code can continue to use integer IDs). - 4. If the item was not found, we delgate the call to a child zone + 4. If the item was not found, we delegate the call to a child zone using the UUID. """ def __init__(self, method_name): diff --git a/nova/scheduler/host_filter.py b/nova/scheduler/host_filter.py index 5f61f9456..967d3db64 100644 --- a/nova/scheduler/host_filter.py +++ b/nova/scheduler/host_filter.py @@ -329,8 +329,9 @@ class HostFilterScheduler(zone_aware_scheduler.ZoneAwareScheduler): 'instance_type': <InstanceType dict>} """ - def filter_hosts(self, num, request_spec): + def filter_hosts(self, topic, request_spec, hosts=None): """Filter the full host list (from the ZoneManager)""" + filter_name = request_spec.get('filter', None) host_filter = choose_host_filter(filter_name) @@ -341,8 +342,9 @@ class HostFilterScheduler(zone_aware_scheduler.ZoneAwareScheduler): name, query = host_filter.instance_type_to_filter(instance_type) return host_filter.filter_hosts(self.zone_manager, query) - def weigh_hosts(self, num, request_spec, hosts): + def weigh_hosts(self, topic, request_spec, hosts): """Derived classes must override this method and return a lists of hosts in [{weight, hostname}] format. """ - return [dict(weight=1, hostname=host) for host, caps in hosts] + return [dict(weight=1, hostname=hostname, capabilities=caps) + for hostname, caps in hosts] diff --git a/nova/scheduler/least_cost.py b/nova/scheduler/least_cost.py index 629fe2e42..6f5eb66fd 100644 --- a/nova/scheduler/least_cost.py +++ b/nova/scheduler/least_cost.py @@ -48,25 +48,43 @@ def noop_cost_fn(host): return 1 -flags.DEFINE_integer('fill_first_cost_fn_weight', 1, +flags.DEFINE_integer('compute_fill_first_cost_fn_weight', 1, 'How much weight to give the fill-first cost function') -def fill_first_cost_fn(host): +def compute_fill_first_cost_fn(host): """Prefer hosts that have less ram available, filter_hosts will exclude hosts that don't have enough ram""" hostname, caps = host - free_mem = caps['compute']['host_memory_free'] + free_mem = caps['host_memory_free'] return free_mem class LeastCostScheduler(zone_aware_scheduler.ZoneAwareScheduler): - def get_cost_fns(self): + def __init__(self, *args, **kwargs): + self.cost_fns_cache = {} + super(LeastCostScheduler, self).__init__(*args, **kwargs) + + def get_cost_fns(self, topic): """Returns a list of tuples containing weights and cost functions to use for weighing hosts """ + + if topic in self.cost_fns_cache: + return self.cost_fns_cache[topic] + cost_fns = [] for cost_fn_str in FLAGS.least_cost_scheduler_cost_functions: + if '.' in cost_fn_str: + short_name = cost_fn_str.split('.')[-1] + else: + short_name = cost_fn_str + cost_fn_str = "%s.%s.%s" % ( + __name__, self.__class__.__name__, short_name) + + if not (short_name.startswith('%s_' % topic) or + short_name.startswith('noop')): + continue try: # NOTE(sirp): import_class is somewhat misnamed since it can @@ -84,23 +102,23 @@ class LeastCostScheduler(zone_aware_scheduler.ZoneAwareScheduler): cost_fns.append((weight, cost_fn)) + self.cost_fns_cache[topic] = cost_fns return cost_fns - def weigh_hosts(self, num, request_spec, hosts): + def weigh_hosts(self, topic, request_spec, hosts): """Returns a list of dictionaries of form: - [ {weight: weight, hostname: hostname} ]""" - - # FIXME(sirp): weigh_hosts should handle more than just instances - hostnames = [hostname for hostname, caps in hosts] + [ {weight: weight, hostname: hostname, capabilities: capabs} ] + """ - cost_fns = self.get_cost_fns() + cost_fns = self.get_cost_fns(topic) costs = weighted_sum(domain=hosts, weighted_fns=cost_fns) weighted = [] weight_log = [] - for cost, hostname in zip(costs, hostnames): + for cost, (hostname, caps) in zip(costs, hosts): weight_log.append("%s: %s" % (hostname, "%.2f" % cost)) - weight_dict = dict(weight=cost, hostname=hostname) + weight_dict = dict(weight=cost, hostname=hostname, + capabilities=caps) weighted.append(weight_dict) LOG.debug(_("Weighted Costs => %s") % weight_log) @@ -127,7 +145,8 @@ def weighted_sum(domain, weighted_fns, normalize=True): weighted_fns - list of weights and functions like: [(weight, objective-functions)] - Returns an unsorted of scores. To pair with hosts do: zip(scores, hosts) + Returns an unsorted list of scores. To pair with hosts do: + zip(scores, hosts) """ # Table of form: # { domain1: [score1, score2, ..., scoreM] @@ -150,7 +169,6 @@ def weighted_sum(domain, weighted_fns, normalize=True): domain_scores = [] for idx in sorted(score_table): elem_score = sum(score_table[idx]) - elem = domain[idx] domain_scores.append(elem_score) return domain_scores diff --git a/nova/scheduler/zone_aware_scheduler.py b/nova/scheduler/zone_aware_scheduler.py index e7bff2faa..9e4fc47d1 100644 --- a/nova/scheduler/zone_aware_scheduler.py +++ b/nova/scheduler/zone_aware_scheduler.py @@ -180,18 +180,22 @@ class ZoneAwareScheduler(driver.Scheduler): request_spec, kwargs) return None + num_instances = request_spec.get('num_instances', 1) + LOG.debug(_("Attempting to build %(num_instances)d instance(s)") % + locals()) + # Create build plan and provision ... build_plan = self.select(context, request_spec) if not build_plan: raise driver.NoValidHost(_('No hosts were available')) - for num in xrange(request_spec['num_instances']): + for num in xrange(num_instances): if not build_plan: break - item = build_plan.pop(0) - self._provision_resource(context, item, instance_id, request_spec, - kwargs) + build_plan_item = build_plan.pop(0) + self._provision_resource(context, build_plan_item, instance_id, + request_spec, kwargs) # Returning None short-circuits the routing to Compute (since # we've already done it here) @@ -224,18 +228,36 @@ class ZoneAwareScheduler(driver.Scheduler): raise NotImplemented(_("Zone Aware Scheduler only understands " "Compute nodes (for now)")) - #TODO(sandy): how to infer this from OS API params? - num_instances = 1 - - # Filter local hosts based on requirements ... - host_list = self.filter_hosts(num_instances, request_spec) + num_instances = request_spec.get('num_instances', 1) + instance_type = request_spec['instance_type'] - # TODO(sirp): weigh_hosts should also be a function of 'topic' or - # resources, so that we can apply different objective functions to it + weighted = [] + host_list = None + + for i in xrange(num_instances): + # Filter local hosts based on requirements ... + # + # The first pass through here will pass 'None' as the + # host_list.. which tells the filter to build the full + # list of hosts. + # On a 2nd pass, the filter can modify the host_list with + # any updates it needs to make based on resources that + # may have been consumed from a previous build.. + host_list = self.filter_hosts(topic, request_spec, host_list) + if not host_list: + LOG.warn(_("Filter returned no hosts after processing " + "%(i)d of %(num_instances)d instances") % locals()) + break - # then weigh the selected hosts. - # weighted = [{weight=weight, name=hostname}, ...] - weighted = self.weigh_hosts(num_instances, request_spec, host_list) + # then weigh the selected hosts. + # weighted = [{weight=weight, hostname=hostname, + # capabilities=capabs}, ...] + weights = self.weigh_hosts(topic, request_spec, host_list) + weights.sort(key=operator.itemgetter('weight')) + best_weight = weights[0] + weighted.append(best_weight) + self.consume_resources(topic, best_weight['capabilities'], + instance_type) # Next, tack on the best weights from the child zones ... json_spec = json.dumps(request_spec) @@ -254,18 +276,65 @@ class ZoneAwareScheduler(driver.Scheduler): weighted.sort(key=operator.itemgetter('weight')) return weighted - def filter_hosts(self, num, request_spec): - """Derived classes must override this method and return - a list of hosts in [(hostname, capability_dict)] format. + def compute_filter(self, hostname, capabilities, request_spec): + """Return whether or not we can schedule to this compute node. + Derived classes should override this and return True if the host + is acceptable for scheduling. """ - # NOTE(sirp): The default logic is the equivalent to AllHostsFilter - service_states = self.zone_manager.service_states - return [(host, services) - for host, services in service_states.iteritems()] + instance_type = request_spec['instance_type'] + requested_mem = instance_type['memory_mb'] * 1024 * 1024 + return capabilities['host_memory_free'] >= requested_mem + + def filter_hosts(self, topic, request_spec, host_list=None): + """Return a list of hosts which are acceptable for scheduling. + Return value should be a list of (hostname, capability_dict)s. + Derived classes may override this, but may find the + '<topic>_filter' function more appropriate. + """ + + def _default_filter(self, hostname, capabilities, request_spec): + """Default filter function if there's no <topic>_filter""" + # NOTE(sirp): The default logic is the equivalent to + # AllHostsFilter + return True - def weigh_hosts(self, num, request_spec, hosts): + filter_func = getattr(self, '%s_filter' % topic, _default_filter) + + if host_list is None: + first_run = True + host_list = self.zone_manager.service_states.iteritems() + else: + first_run = False + + filtered_hosts = [] + for host, services in host_list: + if first_run: + if topic not in services: + continue + services = services[topic] + if filter_func(host, services, request_spec): + filtered_hosts.append((host, services)) + return filtered_hosts + + def weigh_hosts(self, topic, request_spec, hosts): """Derived classes may override this to provide more sophisticated scheduling objectives """ # NOTE(sirp): The default logic is the same as the NoopCostFunction - return [dict(weight=1, hostname=host) for host, caps in hosts] + return [dict(weight=1, hostname=hostname, capabilities=capabilities) + for hostname, capabilities in hosts] + + def compute_consume(self, capabilities, instance_type): + """Consume compute resources for selected host""" + + requested_mem = max(instance_type['memory_mb'], 0) * 1024 * 1024 + capabilities['host_memory_free'] -= requested_mem + + def consume_resources(self, topic, capabilities, instance_type): + """Consume resources for a specific host. 'host' is a tuple + of the hostname and the services""" + + consume_func = getattr(self, '%s_consume' % topic, None) + if not consume_func: + return + consume_func(capabilities, instance_type) diff --git a/nova/tests/scheduler/test_least_cost_scheduler.py b/nova/tests/scheduler/test_least_cost_scheduler.py index 9a5318aee..49791053e 100644 --- a/nova/tests/scheduler/test_least_cost_scheduler.py +++ b/nova/tests/scheduler/test_least_cost_scheduler.py @@ -122,15 +122,16 @@ class LeastCostSchedulerTestCase(test.TestCase): for hostname, caps in hosts] self.assertWeights(expected, num, request_spec, hosts) - def test_fill_first_cost_fn(self): + def test_compute_fill_first_cost_fn(self): FLAGS.least_cost_scheduler_cost_functions = [ - 'nova.scheduler.least_cost.fill_first_cost_fn', + 'nova.scheduler.least_cost.compute_fill_first_cost_fn', ] - FLAGS.fill_first_cost_fn_weight = 1 + FLAGS.compute_fill_first_cost_fn_weight = 1 num = 1 - request_spec = {} - hosts = self.sched.filter_hosts(num, request_spec) + instance_type = {'memory_mb': 1024} + request_spec = {'instance_type': instance_type} + hosts = self.sched.filter_hosts('compute', request_spec, None) expected = [] for idx, (hostname, caps) in enumerate(hosts): diff --git a/nova/tests/scheduler/test_scheduler.py b/nova/tests/scheduler/test_scheduler.py index 4be59d411..fea8b424d 100644 --- a/nova/tests/scheduler/test_scheduler.py +++ b/nova/tests/scheduler/test_scheduler.py @@ -1074,7 +1074,7 @@ class DynamicNovaClientTest(test.TestCase): self.assertEquals(api._issue_novaclient_command( FakeNovaClient(FakeServerCollection()), - zone, "servers", "find", "name").b, 22) + zone, "servers", "find", name="test").b, 22) self.assertEquals(api._issue_novaclient_command( FakeNovaClient(FakeServerCollection()), @@ -1088,7 +1088,7 @@ class DynamicNovaClientTest(test.TestCase): self.assertEquals(api._issue_novaclient_command( FakeNovaClient(FakeEmptyServerCollection()), - zone, "servers", "find", "name"), None) + zone, "servers", "find", name="test"), None) self.assertEquals(api._issue_novaclient_command( FakeNovaClient(FakeEmptyServerCollection()), diff --git a/nova/tests/scheduler/test_zone_aware_scheduler.py b/nova/tests/scheduler/test_zone_aware_scheduler.py index 37c6488cc..32f5150a5 100644 --- a/nova/tests/scheduler/test_zone_aware_scheduler.py +++ b/nova/tests/scheduler/test_zone_aware_scheduler.py @@ -55,29 +55,21 @@ def fake_zone_manager_service_states(num_hosts): class FakeZoneAwareScheduler(zone_aware_scheduler.ZoneAwareScheduler): - def filter_hosts(self, num, specs): - # NOTE(sirp): this is returning [(hostname, services)] - return self.zone_manager.service_states.items() - - def weigh_hosts(self, num, specs, hosts): - fake_weight = 99 - weighted = [] - for hostname, caps in hosts: - weighted.append(dict(weight=fake_weight, name=hostname)) - return weighted + # No need to stub anything at the moment + pass class FakeZoneManager(zone_manager.ZoneManager): def __init__(self): self.service_states = { 'host1': { - 'compute': {'ram': 1000}, + 'compute': {'host_memory_free': 1073741824}, }, 'host2': { - 'compute': {'ram': 2000}, + 'compute': {'host_memory_free': 2147483648}, }, 'host3': { - 'compute': {'ram': 3000}, + 'compute': {'host_memory_free': 3221225472}, }, } @@ -154,8 +146,8 @@ class ZoneAwareSchedulerTestCase(test.TestCase): def test_zone_aware_scheduler(self): """ - Create a nested set of FakeZones, ensure that a select call returns the - appropriate build plan. + Create a nested set of FakeZones, try to build multiple instances + and ensure that a select call returns the appropriate build plan. """ sched = FakeZoneAwareScheduler() self.stubs.Set(sched, '_call_zone_method', fake_call_zone_method) @@ -164,13 +156,17 @@ class ZoneAwareSchedulerTestCase(test.TestCase): sched.set_zone_manager(zm) fake_context = {} - build_plan = sched.select(fake_context, {}) + build_plan = sched.select(fake_context, + {'instance_type': {'memory_mb': 512}, + 'num_instances': 4}) - self.assertEqual(15, len(build_plan)) + # 4 from local zones, 12 from remotes + self.assertEqual(16, len(build_plan)) - hostnames = [plan_item['name'] - for plan_item in build_plan if 'name' in plan_item] - self.assertEqual(3, len(hostnames)) + hostnames = [plan_item['hostname'] + for plan_item in build_plan if 'hostname' in plan_item] + # 4 local hosts + self.assertEqual(4, len(hostnames)) def test_empty_zone_aware_scheduler(self): """ @@ -185,8 +181,7 @@ class ZoneAwareSchedulerTestCase(test.TestCase): fake_context = {} self.assertRaises(driver.NoValidHost, sched.schedule_run_instance, fake_context, 1, - dict(host_filter=None, - request_spec={'instance_type': {}})) + dict(host_filter=None, instance_type={})) def test_schedule_do_not_schedule_with_hint(self): """ diff --git a/nova/tests/test_utils.py b/nova/tests/test_utils.py index 3a3f914e4..0c359e981 100644 --- a/nova/tests/test_utils.py +++ b/nova/tests/test_utils.py @@ -276,6 +276,19 @@ class GenericUtilsTestCase(test.TestCase): result = utils.parse_server_string('www.exa:mple.com:8443') self.assertEqual(('', ''), result) + def test_bool_from_str(self): + self.assertTrue(utils.bool_from_str('1')) + self.assertTrue(utils.bool_from_str('2')) + self.assertTrue(utils.bool_from_str('-1')) + self.assertTrue(utils.bool_from_str('true')) + self.assertTrue(utils.bool_from_str('True')) + self.assertTrue(utils.bool_from_str('tRuE')) + self.assertFalse(utils.bool_from_str('False')) + self.assertFalse(utils.bool_from_str('false')) + self.assertFalse(utils.bool_from_str('0')) + self.assertFalse(utils.bool_from_str(None)) + self.assertFalse(utils.bool_from_str('junk')) + class IsUUIDLikeTestCase(test.TestCase): def assertUUIDLike(self, val, expected): diff --git a/nova/utils.py b/nova/utils.py index 510cdd9f6..be26899ca 100644 --- a/nova/utils.py +++ b/nova/utils.py @@ -772,6 +772,17 @@ def is_uuid_like(val): return (len(val) == 36) and (val.count('-') == 4) +def bool_from_str(val): + """Convert a string representation of a bool into a bool value""" + + if not val: + return False + try: + return True if int(val) else False + except ValueError: + return val.lower() == 'true' + + class Bootstrapper(object): """Provides environment bootstrapping capabilities for entry points.""" diff --git a/tools/pip-requires b/tools/pip-requires index 6e686b7e7..dec93c351 100644 --- a/tools/pip-requires +++ b/tools/pip-requires @@ -9,7 +9,7 @@ boto==1.9b carrot==0.10.5 eventlet lockfile==0.8 -python-novaclient==2.5.3 +python-novaclient==2.5.7 python-daemon==1.5.5 python-gflags==1.3 redis==2.0.0 |
