diff options
| author | Russell Bryant <rbryant@redhat.com> | 2013-06-20 12:56:51 -0400 |
|---|---|---|
| committer | Russell Bryant <rbryant@redhat.com> | 2013-06-20 12:57:45 -0400 |
| commit | 3c67e4cbe419d666f75fada777c71ddd0cd05a29 (patch) | |
| tree | 16eb22bfa51a6ee19448b4bb964cf624d087be04 | |
| parent | 3ab42d430d496ecf0c12d37c05f9923faf33114f (diff) | |
| download | nova-3c67e4cbe419d666f75fada777c71ddd0cd05a29.tar.gz nova-3c67e4cbe419d666f75fada777c71ddd0cd05a29.tar.xz nova-3c67e4cbe419d666f75fada777c71ddd0cd05a29.zip | |
Revert "Initial scheduler support for instance_groups"
This reverts commit b2c17ae891b12118b9d11bf0ecc44e77956715c8.
While reviewing merged changes, I came across this one. It's using the
conductor in a way that it shouldn't. The scheduler has db access, so
there's no need to add the extra conductor overhead. The best thing to
do here is to just revert for now, IMO.
Change-Id: Iaa0d67df7bb84913784d925cffebcd5f252249ea
| -rw-r--r-- | nova/conductor/api.py | 15 | ||||
| -rw-r--r-- | nova/conductor/manager.py | 35 | ||||
| -rw-r--r-- | nova/conductor/rpcapi.py | 18 | ||||
| -rw-r--r-- | nova/scheduler/driver.py | 26 | ||||
| -rw-r--r-- | nova/scheduler/filter_scheduler.py | 26 | ||||
| -rw-r--r-- | nova/tests/conductor/test_conductor.py | 50 | ||||
| -rw-r--r-- | nova/tests/scheduler/test_filter_scheduler.py | 13 |
7 files changed, 25 insertions, 158 deletions
diff --git a/nova/conductor/api.py b/nova/conductor/api.py index c681dfb08..3f955d24f 100644 --- a/nova/conductor/api.py +++ b/nova/conductor/api.py @@ -109,21 +109,6 @@ class LocalAPI(object): def instance_fault_create(self, context, values): return self._manager.instance_fault_create(context, values) - def instance_group_members_add(self, context, group_uuid, members, - set_delete=False): - return self._manager.instance_group_members_add(context, - group_uuid, - members, - set_delete=set_delete) - - def instance_group_member_delete(self, context, group_uuid, instance_id): - return self._manager.instance_group_member_delete(context, - group_uuid, - instance_id) - - def instance_group_get_all(self, context, group_uuid): - return self._manager.instance_group_get_all(context, group_uuid) - def migration_get(self, context, migration_id): return self._manager.migration_get(context, migration_id) diff --git a/nova/conductor/manager.py b/nova/conductor/manager.py index 053c44323..6693e7fee 100644 --- a/nova/conductor/manager.py +++ b/nova/conductor/manager.py @@ -67,7 +67,7 @@ class ConductorManager(manager.Manager): namespace. See the ComputeTaskManager class for details. """ - RPC_API_VERSION = '1.52' + RPC_API_VERSION = '1.51' def __init__(self, *args, **kwargs): super(ConductorManager, self).__init__(service_name='conductor', @@ -319,13 +319,6 @@ class ConductorManager(manager.Manager): self.db.instance_destroy(context, instance['uuid']) def instance_info_cache_delete(self, context, instance): - # Delete the instance from the instance_group member data - system_meta = self.db.instance_system_metadata_get(context, - instance['uuid']) - instance_group = system_meta.get('instance_group', None) - if instance_group: - self.db.instance_group_member_delete(context, instance_group, - instance['uuid']) self.db.instance_info_cache_delete(context, instance['uuid']) def instance_info_cache_update(self, context, instance, values): @@ -340,32 +333,6 @@ class ConductorManager(manager.Manager): result = self.db.instance_fault_create(context, values) return jsonutils.to_primitive(result) - def instance_group_members_add(self, context, group_uuid, members, - set_delete=False): - result = self.db.instance_group_members_add(context, group_uuid, - members, set_delete=set_delete) - return jsonutils.to_primitive(result) - - def instance_group_member_delete(self, context, group_uuid, instance_id): - result = self.db.instance_group_member_delete(context, - group_uuid, instance_id) - return jsonutils.to_primitive(result) - - def instance_group_get_all(self, context, group_uuid): - instance_group = self.db.instance_group_get(context, group_uuid) - instances = [] - for member in instance_group['members']: - try: - instance = self.db.instance_get_by_uuid(context, member) - if instance: - instances.append(instance) - except exception.InstanceNotFound: - LOG.error(_("Invalid instance %(member)s in instance " - " group %(group)s"), - {'member': member, 'group': group_uuid}) - return jsonutils.to_primitive({'instance_group': instance_group, - 'instances': instances}) - # NOTE(kerrin): This method can be removed in v2.0 of the RPC API. def vol_get_usage_by_time(self, context, start_time): result = self.db.vol_get_usage_by_time(context, start_time) diff --git a/nova/conductor/rpcapi.py b/nova/conductor/rpcapi.py index 4e78e29bb..bb66ca8b2 100644 --- a/nova/conductor/rpcapi.py +++ b/nova/conductor/rpcapi.py @@ -102,8 +102,6 @@ class ConductorAPI(nova.openstack.common.rpc.proxy.RpcProxy): 1.50 - Added object_action() and object_class_action() 1.51 - Added the 'legacy' argument to block_device_mapping_get_all_by_instance - 1.52 - Added instance_group_members_add, instance_group_member_delete and - instance_group_get_all """ BASE_RPC_API_VERSION = '1.0' @@ -335,22 +333,6 @@ class ConductorAPI(nova.openstack.common.rpc.proxy.RpcProxy): msg = self.make_msg('instance_fault_create', values=values) return self.call(context, msg, version='1.36') - def instance_group_members_add(self, context, group_uuid, members, - set_delete=False): - msg = self.make_msg('instance_group_members_add', - group_uuid=group_uuid, members=members, - set_delete=set_delete) - return self.call(context, msg, version='1.52') - - def instance_group_member_delete(self, context, group_uuid, instance_id): - msg = self.make_msg('instance_group_member_delete', - group_uuid=group_uuid, instance_id=instance_id) - return self.call(context, msg, version='1.52') - - def instance_group_get_all(self, context, group_uuid): - msg = self.make_msg('instance_group_get_all', group_uuid=group_uuid) - return self.call(context, msg, version='1.52') - def action_event_start(self, context, values): values_p = jsonutils.to_primitive(values) msg = self.make_msg('action_event_start', values=values_p) diff --git a/nova/scheduler/driver.py b/nova/scheduler/driver.py index 6a429f526..d5a1eedea 100644 --- a/nova/scheduler/driver.py +++ b/nova/scheduler/driver.py @@ -94,14 +94,6 @@ def instance_update_db(context, instance_uuid, extra_values=None): values = {'host': None, 'node': None, 'scheduled_at': now} if extra_values: values.update(extra_values) - # Get the instance_group if it exists. This will be written to the - # system_metadata as it will be used when the VM is deleted - system_metadata = extra_values.get('system_metadata', None) - if system_metadata: - instance_group = system_metadata.get('instance_group', None) - if instance_group: - conductor_api.instance_group_members_add(context, - instance_group, [instance_uuid]) return db.instance_update(context, instance_uuid, values) @@ -147,17 +139,15 @@ class Scheduler(object): for service in services if self.servicegroup_api.service_is_up(service)] - def instance_group_data(self, context, group_uuid): - """Return the the group data for the instance group.""" + def group_hosts(self, context, group): + """Return the list of hosts that have VM's from the group.""" - return conductor_api.instance_group_get_all(context, group_uuid) - - def instance_group_hosts(self, context, instance_group): - """Return the list of hosts that have VM's from the instance_group.""" - - instances = instance_group['instances'] - return [instance['host'] for instance in instances - if instance.get('host') is not None] + # The system_metadata 'group' will be filtered + members = db.instance_get_all_by_filters(context, + {'deleted': False, 'group': group}) + return [member['host'] + for member in members + if member.get('host') is not None] def schedule_prep_resize(self, context, image, request_spec, filter_properties, instance, instance_type, diff --git a/nova/scheduler/filter_scheduler.py b/nova/scheduler/filter_scheduler.py index a75bf286f..08cb6a20e 100644 --- a/nova/scheduler/filter_scheduler.py +++ b/nova/scheduler/filter_scheduler.py @@ -173,11 +173,11 @@ class FilterScheduler(driver.Scheduler): # Update the metadata if necessary scheduler_hints = filter_properties.get('scheduler_hints') or {} - group_uuid = scheduler_hints.get('instance_group', None) + group = scheduler_hints.get('group', None) values = None - if group_uuid: + if group: values = request_spec['instance_properties']['system_metadata'] - values.update({'instance_group': group_uuid}) + values.update({'group': group}) values = {'system_metadata': values} updated_instance = driver.instance_update_db(context, @@ -300,19 +300,17 @@ class FilterScheduler(driver.Scheduler): instance_properties = request_spec['instance_properties'] instance_type = request_spec.get("instance_type", None) - # Get the instance_group + # Get the group update_group_hosts = False scheduler_hints = filter_properties.get('scheduler_hints') or {} - group_uuid = scheduler_hints.get('instance_group', None) - if group_uuid: - group_data = self.instance_group_data(elevated, group_uuid) - if 'anti-affinity' in group_data['instance_group']['policies']: - update_group_hosts = True - group_hosts = self.instance_group_hosts(elevated, group_data) - if 'group_hosts' not in filter_properties: - filter_properties.update({'group_hosts': []}) - user_hosts = filter_properties['group_hosts'] - filter_properties['group_hosts'] = user_hosts + group_hosts + group = scheduler_hints.get('group', None) + if group: + group_hosts = self.group_hosts(elevated, group) + update_group_hosts = True + if 'group_hosts' not in filter_properties: + filter_properties.update({'group_hosts': []}) + configured_hosts = filter_properties['group_hosts'] + filter_properties['group_hosts'] = configured_hosts + group_hosts config_options = self._get_configuration_options() diff --git a/nova/tests/conductor/test_conductor.py b/nova/tests/conductor/test_conductor.py index b86d3320a..7a33cfbb9 100644 --- a/nova/tests/conductor/test_conductor.py +++ b/nova/tests/conductor/test_conductor.py @@ -340,25 +340,7 @@ class _BaseTestCase(object): self.conductor.instance_destroy(self.context, {'uuid': 'fake-uuid'}) def test_instance_info_cache_delete(self): - self.mox.StubOutWithMock(db, 'instance_system_metadata_get') self.mox.StubOutWithMock(db, 'instance_info_cache_delete') - fake_data = {} - db.instance_system_metadata_get(self.context, - 'fake-uuid').AndReturn(fake_data) - db.instance_info_cache_delete(self.context, 'fake-uuid') - self.mox.ReplayAll() - self.conductor.instance_info_cache_delete(self.context, - {'uuid': 'fake-uuid'}) - - def test_instance_info_cache_delete_with_instance_group(self): - self.mox.StubOutWithMock(db, 'instance_system_metadata_get') - self.mox.StubOutWithMock(db, 'instance_group_member_delete') - self.mox.StubOutWithMock(db, 'instance_info_cache_delete') - fake_data = {'instance_group': 'fake-group'} - db.instance_system_metadata_get(self.context, - 'fake-uuid').AndReturn(fake_data) - db.instance_group_member_delete(self.context, 'fake-group', - 'fake-uuid') db.instance_info_cache_delete(self.context, 'fake-uuid') self.mox.ReplayAll() self.conductor.instance_info_cache_delete(self.context, @@ -456,38 +438,6 @@ class _BaseTestCase(object): 'fake-values') self.assertEqual(result, 'fake-result') - def test_instance_group_members_add(self): - self.mox.StubOutWithMock(db, 'instance_group_members_add') - db.instance_group_members_add(self.context, 'fake-uuid', - 'fake-members', set_delete=False).AndReturn('fake-members') - self.mox.ReplayAll() - result = self.conductor.instance_group_members_add(self.context, - 'fake-uuid', 'fake-members', set_delete=False) - self.assertEqual(result, 'fake-members') - - def test_instance_group_member_delete(self): - self.mox.StubOutWithMock(db, 'instance_group_member_delete') - db.instance_group_member_delete(self.context, 'fake-uuid', - 'fake-instance-id').AndReturn('fake-result') - self.mox.ReplayAll() - result = self.conductor.instance_group_member_delete(self.context, - 'fake-uuid', 'fake-instance-id') - self.assertEqual(result, 'fake-result') - - def test_instance_group_get_all(self): - self.mox.StubOutWithMock(db, 'instance_group_get') - self.mox.StubOutWithMock(db, 'instance_get_by_uuid') - group = {'uuid': 'fake-uuid', - 'members': ['fake-instance-id']} - db.instance_group_get(self.context, 'fake-uuid').AndReturn(group) - db.instance_get_by_uuid(self.context, - 'fake-instance-id').AndReturn('fake-instance') - self.mox.ReplayAll() - result = self.conductor.instance_group_get_all(self.context, - 'fake-uuid') - self.assertEqual(result, {'instance_group': group, - 'instances': ['fake-instance']}) - def test_task_log_get(self): self.mox.StubOutWithMock(db, 'task_log_get') db.task_log_get(self.context, 'task', 'begin', 'end', 'host', diff --git a/nova/tests/scheduler/test_filter_scheduler.py b/nova/tests/scheduler/test_filter_scheduler.py index a02889df9..d6cc7808e 100644 --- a/nova/tests/scheduler/test_filter_scheduler.py +++ b/nova/tests/scheduler/test_filter_scheduler.py @@ -532,7 +532,7 @@ class FilterSchedulerTestCase(test_scheduler.SchedulerTestCase): def test_basic_schedule_run_instances_anti_affinity(self): filter_properties = {'scheduler_hints': - {'instance_group': 'fake_uuid'}} + {'group': 'cats'}} # Request spec 1 instance_opts1 = {'project_id': 1, 'os_type': 'Linux', 'memory_mb': 512, 'root_gb': 512, @@ -562,24 +562,19 @@ class FilterSchedulerTestCase(test_scheduler.SchedulerTestCase): self.mox.StubOutWithMock(driver, 'instance_update_db') self.mox.StubOutWithMock(compute_rpcapi.ComputeAPI, 'run_instance') - self.mox.StubOutWithMock(sched, 'instance_group_data') + self.mox.StubOutWithMock(sched, 'group_hosts') instance1_1 = {'uuid': 'fake-uuid1-1'} instance1_2 = {'uuid': 'fake-uuid1-2'} - group_data = {'instance_group': {'uuid': 'fake_uuid', - 'policies': ['anti-affinity']}, - 'instances': []} - sched.instance_group_data(mox.IgnoreArg(), - 'fake_uuid').AndReturn(group_data) + sched.group_hosts(mox.IgnoreArg(), 'cats').AndReturn([]) def inc_launch_index1(*args, **kwargs): request_spec1['instance_properties']['launch_index'] = ( request_spec1['instance_properties']['launch_index'] + 1) expected_metadata = {'system_metadata': - {'system': 'metadata', - 'instance_group': 'fake_uuid'}} + {'system': 'metadata', 'group': 'cats'}} driver.instance_update_db(fake_context, instance1_1['uuid'], extra_values=expected_metadata).WithSideEffects( inc_launch_index1).AndReturn(instance1_1) |
