diff options
| author | Jenkins <jenkins@review.openstack.org> | 2013-05-03 09:02:54 +0000 |
|---|---|---|
| committer | Gerrit Code Review <review@openstack.org> | 2013-05-03 09:02:54 +0000 |
| commit | 00daebb7d12e6b78cc77f9d6eaaa19e940b21b38 (patch) | |
| tree | 24d07b1711e5ab4c92890da5f559c392810455c7 | |
| parent | 2299d37be96630108672d733de14df9b605d7e05 (diff) | |
| parent | c42fd6de5903cdb7c5519bd773e0859767f68043 (diff) | |
| download | nova-00daebb7d12e6b78cc77f9d6eaaa19e940b21b38.tar.gz nova-00daebb7d12e6b78cc77f9d6eaaa19e940b21b38.tar.xz nova-00daebb7d12e6b78cc77f9d6eaaa19e940b21b38.zip | |
Merge "Add force_nodes to filter properties"
| -rw-r--r-- | nova/compute/api.py | 28 | ||||
| -rw-r--r-- | nova/scheduler/host_manager.py | 66 | ||||
| -rw-r--r-- | nova/tests/scheduler/test_host_manager.py | 97 |
3 files changed, 164 insertions, 27 deletions
diff --git a/nova/compute/api.py b/nova/compute/api.py index f4560bd0f..454fd90d9 100644 --- a/nova/compute/api.py +++ b/nova/compute/api.py @@ -403,17 +403,32 @@ class API(base.Base): @staticmethod def _handle_availability_zone(availability_zone): # NOTE(vish): We have a legacy hack to allow admins to specify hosts - # via az using az:host. It might be nice to expose an + # via az using az:host:node. It might be nice to expose an # api to specify specific hosts to force onto, but for # now it just supports this legacy hack. + # NOTE(deva): It is also possible to specify az::node, in which case + # the host manager will determine the correct host. forced_host = None + forced_node = None if availability_zone and ':' in availability_zone: - availability_zone, forced_host = availability_zone.split(':') + c = availability_zone.count(':') + if c == 1: + availability_zone, forced_host = availability_zone.split(':') + elif c == 2: + if '::' in availability_zone: + availability_zone, forced_node = \ + availability_zone.split('::') + else: + availability_zone, forced_host, forced_node = \ + availability_zone.split(':') + else: + raise exception.InvalidInput( + reason="Unable to parse availability_zone") if not availability_zone: availability_zone = CONF.default_schedule_zone - return availability_zone, forced_host + return availability_zone, forced_host, forced_node @staticmethod def _inherit_properties_from_image(image, auto_disk_config): @@ -562,8 +577,8 @@ class API(base.Base): root_device_name = block_device.properties_root_device_name( image.get('properties', {})) - availability_zone, forced_host = self._handle_availability_zone( - availability_zone) + availability_zone, forced_host, forced_node = \ + self._handle_availability_zone(availability_zone) system_metadata = instance_types.save_instance_type_info( dict(), instance_type) @@ -611,6 +626,9 @@ class API(base.Base): if forced_host: check_policy(context, 'create:forced_host', {}) filter_properties['force_hosts'] = [forced_host] + if forced_node: + check_policy(context, 'create:forced_host', {}) + filter_properties['force_nodes'] = [forced_node] for i in xrange(num_instances): options = base_options.copy() diff --git a/nova/scheduler/host_manager.py b/nova/scheduler/host_manager.py index d12f15f38..9dbe6bd67 100644 --- a/nova/scheduler/host_manager.py +++ b/nova/scheduler/host_manager.py @@ -300,40 +300,66 @@ class HostManager(object): def _strip_ignore_hosts(host_map, hosts_to_ignore): ignored_hosts = [] for host in hosts_to_ignore: - if host in host_map: - del host_map[host] - ignored_hosts.append(host) + for (hostname, nodename) in host_map.keys(): + if host == hostname: + del host_map[(hostname, nodename)] + ignored_hosts.append(host) ignored_hosts_str = ', '.join(ignored_hosts) - msg = _('Host filter ignoring hosts: %(ignored_hosts_str)s') - LOG.debug(msg, locals()) + msg = _('Host filter ignoring hosts: %s') + LOG.debug(msg % ignored_hosts_str) def _match_forced_hosts(host_map, hosts_to_force): - for host in host_map.keys(): - if host not in hosts_to_force: - del host_map[host] - if not host_map: + forced_hosts = [] + for (hostname, nodename) in host_map.keys(): + if hostname not in hosts_to_force: + del host_map[(hostname, nodename)] + else: + forced_hosts.append(hostname) + if host_map: + forced_hosts_str = ', '.join(forced_hosts) + msg = _('Host filter forcing available hosts to %s') + else: forced_hosts_str = ', '.join(hosts_to_force) - msg = _("No hosts matched due to not matching 'force_hosts'" - "value of '%(forced_hosts_str)s'") - LOG.debug(msg, locals()) - return - forced_hosts_str = ', '.join(host_map.iterkeys()) - msg = _('Host filter forcing available hosts to ' - '%(forced_hosts_str)s') - LOG.debug(msg, locals()) + msg = _("No hosts matched due to not matching " + "'force_hosts' value of '%s'") + LOG.debug(msg % forced_hosts_str) + + def _match_forced_nodes(host_map, nodes_to_force): + forced_nodes = [] + for (hostname, nodename) in host_map.keys(): + if nodename not in nodes_to_force: + del host_map[(hostname, nodename)] + else: + forced_nodes.append(nodename) + if host_map: + forced_nodes_str = ', '.join(forced_nodes) + msg = _('Host filter forcing available nodes to %s') + else: + forced_nodes_str = ', '.join(nodes_to_force) + msg = _("No nodes matched due to not matching " + "'force_nodes' value of '%s'") + LOG.debug(msg % forced_nodes_str) filter_classes = self._choose_host_filters(filter_class_names) ignore_hosts = filter_properties.get('ignore_hosts', []) force_hosts = filter_properties.get('force_hosts', []) - if ignore_hosts or force_hosts: - name_to_cls_map = dict([(x.host, x) for x in hosts]) + force_nodes = filter_properties.get('force_nodes', []) + + if ignore_hosts or force_hosts or force_nodes: + # NOTE(deva): we can't assume "host" is unique because + # one host may have many nodes. + name_to_cls_map = dict([((x.host, x.nodename), x) for x in hosts]) if ignore_hosts: _strip_ignore_hosts(name_to_cls_map, ignore_hosts) if not name_to_cls_map: return [] + # NOTE(deva): allow force_hosts and force_nodes independently if force_hosts: _match_forced_hosts(name_to_cls_map, force_hosts) - # NOTE(vish): Skip filters on forced hosts. + if force_nodes: + _match_forced_nodes(name_to_cls_map, force_nodes) + if force_hosts or force_nodes: + # NOTE(deva): Skip filters when forcing host or node if name_to_cls_map: return name_to_cls_map.values() hosts = name_to_cls_map.itervalues() diff --git a/nova/tests/scheduler/test_host_manager.py b/nova/tests/scheduler/test_host_manager.py index ddc9bd29a..c03c66c4e 100644 --- a/nova/tests/scheduler/test_host_manager.py +++ b/nova/tests/scheduler/test_host_manager.py @@ -45,6 +45,8 @@ class HostManagerTestCase(test.TestCase): self.host_manager = host_manager.HostManager() self.fake_hosts = [host_manager.HostState('fake_host%s' % x, 'fake-node') for x in xrange(1, 5)] + self.fake_hosts += [host_manager.HostState('fake_multihost', + 'fake-node%s' % x) for x in xrange(1, 5)] self.addCleanup(timeutils.clear_time_override) def test_choose_host_filters_not_found(self): @@ -115,7 +117,7 @@ class HostManagerTestCase(test.TestCase): def test_get_filtered_hosts_with_ignore(self): fake_properties = {'ignore_hosts': ['fake_host1', 'fake_host3', - 'fake_host5']} + 'fake_host5', 'fake_multihost']} # [1] and [3] are host2 and host4 info = {'expected_objs': [self.fake_hosts[1], self.fake_hosts[3]], @@ -156,7 +158,7 @@ class HostManagerTestCase(test.TestCase): fake_properties) self._verify_result(info, result, False) - def test_get_filtered_hosts_with_ignore_and_force(self): + def test_get_filtered_hosts_with_ignore_and_force_hosts(self): # Ensure ignore_hosts processed before force_hosts in host filters. fake_properties = {'force_hosts': ['fake_host3', 'fake_host1'], 'ignore_hosts': ['fake_host1']} @@ -172,6 +174,97 @@ class HostManagerTestCase(test.TestCase): fake_properties) self._verify_result(info, result, False) + def test_get_filtered_hosts_with_force_host_and_many_nodes(self): + # Ensure all nodes returned for a host with many nodes + fake_properties = {'force_hosts': ['fake_multihost']} + + info = {'expected_objs': [self.fake_hosts[4], self.fake_hosts[5], + self.fake_hosts[6], self.fake_hosts[7]], + 'expected_fprops': fake_properties} + self._mock_get_filtered_hosts(info) + + self.mox.ReplayAll() + + result = self.host_manager.get_filtered_hosts(self.fake_hosts, + fake_properties) + self._verify_result(info, result, False) + + def test_get_filtered_hosts_with_force_nodes(self): + fake_properties = {'force_nodes': ['fake-node2', 'fake-node4', + 'fake-node9']} + + # [5] is fake-node2, [7] is fake-node4 + info = {'expected_objs': [self.fake_hosts[5], self.fake_hosts[7]], + 'expected_fprops': fake_properties} + self._mock_get_filtered_hosts(info) + + self.mox.ReplayAll() + + result = self.host_manager.get_filtered_hosts(self.fake_hosts, + fake_properties) + self._verify_result(info, result, False) + + def test_get_filtered_hosts_with_force_hosts_and_nodes(self): + # Ensure only overlapping results if both force host and node + fake_properties = {'force_hosts': ['fake_host1', 'fake_multihost'], + 'force_nodes': ['fake-node2', 'fake-node9']} + + # [5] is fake-node2 + info = {'expected_objs': [self.fake_hosts[5]], + 'expected_fprops': fake_properties} + self._mock_get_filtered_hosts(info) + + self.mox.ReplayAll() + + result = self.host_manager.get_filtered_hosts(self.fake_hosts, + fake_properties) + self._verify_result(info, result, False) + + def test_get_filtered_hosts_with_force_hosts_and_wrong_nodes(self): + # Ensure non-overlapping force_node and force_host yield no result + fake_properties = {'force_hosts': ['fake_multihost'], + 'force_nodes': ['fake-node']} + + info = {'expected_objs': [], + 'expected_fprops': fake_properties} + self._mock_get_filtered_hosts(info) + + self.mox.ReplayAll() + + result = self.host_manager.get_filtered_hosts(self.fake_hosts, + fake_properties) + self._verify_result(info, result, False) + + def test_get_filtered_hosts_with_ignore_hosts_and_force_nodes(self): + # Ensure ignore_hosts can coexist with force_nodes + fake_properties = {'force_nodes': ['fake-node4', 'fake-node2'], + 'ignore_hosts': ['fake_host1', 'fake_host2']} + + info = {'expected_objs': [self.fake_hosts[5], self.fake_hosts[7]], + 'expected_fprops': fake_properties} + self._mock_get_filtered_hosts(info) + + self.mox.ReplayAll() + + result = self.host_manager.get_filtered_hosts(self.fake_hosts, + fake_properties) + self._verify_result(info, result, False) + + def test_get_filtered_hosts_with_ignore_hosts_and_force_same_nodes(self): + # Ensure ignore_hosts is processed before force_nodes + fake_properties = {'force_nodes': ['fake_node4', 'fake_node2'], + 'ignore_hosts': ['fake_multihost']} + + info = {'expected_objs': [], + 'expected_fprops': fake_properties} + self._mock_get_filtered_hosts(info) + + self.mox.ReplayAll() + + result = self.host_manager.get_filtered_hosts(self.fake_hosts, + fake_properties) + self._verify_result(info, result, False) + def test_update_service_capabilities(self): service_states = self.host_manager.service_states self.assertEqual(len(service_states.keys()), 0) |
