diff options
| author | Michael Still <mikal@stillhq.com> | 2012-11-14 18:37:04 +1100 |
|---|---|---|
| committer | Michael Still <mikal@stillhq.com> | 2012-11-21 09:30:42 +1100 |
| commit | c2de33a0a2132774dc295861cef138ec24bb0cf9 (patch) | |
| tree | 127ebb4e9ff9b64202374dbdc707e55f7038e74c /nova/compute | |
| parent | e984c20f1611d3aa5e7b4607a2232ee25112ab47 (diff) | |
Detect shared storage; handle base cleanup better.
If base image storage is shared, we need to care about remote
instances when we clean up. This patch "learns" which storage is
shared, and then decides what base images are in use anywhere
on the set of compute nodes which share that base storage.
This is complicated because shared instance storage doesn't have
to be per-cluster. It could for example be per rack. We need to
handle that properly.
This should resolve bug 1078594.
Change-Id: I36d0d6e965b114bb68c8f7b7fd43f8e96b2dd8f5
Diffstat (limited to 'nova/compute')
| -rw-r--r-- | nova/compute/manager.py | 18 |
1 files changed, 17 insertions, 1 deletions
diff --git a/nova/compute/manager.py b/nova/compute/manager.py index 7484a9252..acadd6f61 100644 --- a/nova/compute/manager.py +++ b/nova/compute/manager.py @@ -73,6 +73,7 @@ from nova import quota from nova.scheduler import rpcapi as scheduler_rpcapi from nova import utils from nova.virt import driver +from nova.virt import storage_users from nova.virt import virtapi from nova import volume @@ -3247,4 +3248,19 @@ class ComputeManager(manager.SchedulerDependentManager): return all_instances = self.db.instance_get_all(context) - self.driver.manage_image_cache(context, all_instances) + + # Determine what other nodes use this storage + storage_users.register_storage_use(CONF.instances_path, CONF.host) + nodes = storage_users.get_storage_users(CONF.instances_path) + + # Filter all_instances to only include those nodes which share this + # storage path. + # TODO(mikal): this should be further refactored so that the cache + # cleanup code doesn't know what those instances are, just a remote + # count, and then this logic should be pushed up the stack. + filtered_instances = [] + for instance in all_instances: + if instance['host'] in nodes: + filtered_instances.append(instance) + + self.driver.manage_image_cache(context, filtered_instances) |
