summaryrefslogtreecommitdiffstats
path: root/0002-mm-Abort-reclaim-compaction-if-compaction-can-procee.patch
diff options
context:
space:
mode:
Diffstat (limited to '0002-mm-Abort-reclaim-compaction-if-compaction-can-procee.patch')
-rw-r--r--0002-mm-Abort-reclaim-compaction-if-compaction-can-procee.patch81
1 files changed, 81 insertions, 0 deletions
diff --git a/0002-mm-Abort-reclaim-compaction-if-compaction-can-procee.patch b/0002-mm-Abort-reclaim-compaction-if-compaction-can-procee.patch
new file mode 100644
index 000000000..e74b64d91
--- /dev/null
+++ b/0002-mm-Abort-reclaim-compaction-if-compaction-can-procee.patch
@@ -0,0 +1,81 @@
+From c01043c9aa51a63bd01c60e53494ca4a7e994542 Mon Sep 17 00:00:00 2001
+From: Mel Gorman <mgorman@suse.de>
+Date: Fri, 7 Oct 2011 16:17:23 +0100
+Subject: [PATCH 2/2] mm: Abort reclaim/compaction if compaction can proceed
+
+If compaction can proceed, shrink_zones() stops doing any work but
+the callers still shrink_slab(), raises the priority and potentially
+sleeps. This patch aborts direct reclaim/compaction entirely if
+compaction can proceed.
+
+Signed-off-by: Mel Gorman <mgorman@suse.de>
+---
+ mm/vmscan.c | 20 ++++++++++++++++----
+ 1 files changed, 16 insertions(+), 4 deletions(-)
+
+diff --git a/mm/vmscan.c b/mm/vmscan.c
+index 8c03534..b295a38 100644
+--- a/mm/vmscan.c
++++ b/mm/vmscan.c
+@@ -2000,14 +2000,19 @@ restart:
+ *
+ * If a zone is deemed to be full of pinned pages then just give it a light
+ * scan then give up on it.
++ *
++ * This function returns true if a zone is being reclaimed for a costly
++ * high-order allocation and compaction is either ready to begin or deferred.
++ * This indicates to the caller that it should retry the allocation or fail.
+ */
+-static void shrink_zones(int priority, struct zonelist *zonelist,
++static bool shrink_zones(int priority, struct zonelist *zonelist,
+ struct scan_control *sc)
+ {
+ struct zoneref *z;
+ struct zone *zone;
+ unsigned long nr_soft_reclaimed;
+ unsigned long nr_soft_scanned;
++ bool should_abort_reclaim = false;
+
+ for_each_zone_zonelist_nodemask(zone, z, zonelist,
+ gfp_zone(sc->gfp_mask), sc->nodemask) {
+@@ -2025,12 +2030,15 @@ static void shrink_zones(int priority, struct zonelist *zonelist,
+ if (COMPACTION_BUILD) {
+ /*
+ * If we already have plenty of memory free
+- * for compaction, don't free any more.
++ * for compaction in this zone , don't free any
++ * more.
+ */
+ if (sc->order > PAGE_ALLOC_COSTLY_ORDER &&
+ (compaction_suitable(zone, sc->order) ||
+- compaction_deferred(zone)))
++ compaction_deferred(zone))) {
++ should_abort_reclaim = true;
+ continue;
++ }
+ }
+ /*
+ * This steals pages from memory cgroups over softlimit
+@@ -2049,6 +2057,8 @@ static void shrink_zones(int priority, struct zonelist *zonelist,
+
+ shrink_zone(priority, zone, sc);
+ }
++
++ return should_abort_reclaim;
+ }
+
+ static bool zone_reclaimable(struct zone *zone)
+@@ -2113,7 +2123,9 @@ static unsigned long do_try_to_free_pages(struct zonelist *zonelist,
+ sc->nr_scanned = 0;
+ if (!priority)
+ disable_swap_token(sc->mem_cgroup);
+- shrink_zones(priority, zonelist, sc);
++ if (shrink_zones(priority, zonelist, sc))
++ break;
++
+ /*
+ * Don't shrink slabs when reclaiming memory from
+ * over limit cgroups
+--
+1.7.6.4
+