summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@woody.linux-foundation.org>2007-12-09 10:14:36 -0800
committerLinus Torvalds <torvalds@woody.linux-foundation.org>2007-12-09 10:17:52 -0800
commit7fd272550bd43cc1d7289ef0ab2fa50de137e767 (patch)
tree85c2ae7f4a7f5df3cc2cbd27056ff052ae0de5e2
parent94545baded0bfbabdc30a3a4cb48b3db479dd6ef (diff)
downloadkernel-crypto-7fd272550bd43cc1d7289ef0ab2fa50de137e767.tar.gz
kernel-crypto-7fd272550bd43cc1d7289ef0ab2fa50de137e767.tar.xz
kernel-crypto-7fd272550bd43cc1d7289ef0ab2fa50de137e767.zip
Avoid double memclear() in SLOB/SLUB
Both slob and slub react to __GFP_ZERO by clearing the allocation, which means that passing the GFP_ZERO bit down to the page allocator is just wasteful and pointless. Acked-by: Matt Mackall <mpm@selenic.com> Reviewed-by: Pekka Enberg <penberg@cs.helsinki.fi> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r--mm/slob.c2
-rw-r--r--mm/slub.c3
2 files changed, 4 insertions, 1 deletions
diff --git a/mm/slob.c b/mm/slob.c
index ee2ef8af0d4..773a7aa80ab 100644
--- a/mm/slob.c
+++ b/mm/slob.c
@@ -330,7 +330,7 @@ static void *slob_alloc(size_t size, gfp_t gfp, int align, int node)
/* Not enough space: must allocate a new page */
if (!b) {
- b = slob_new_page(gfp, 0, node);
+ b = slob_new_page(gfp & ~__GFP_ZERO, 0, node);
if (!b)
return 0;
sp = (struct slob_page *)virt_to_page(b);
diff --git a/mm/slub.c b/mm/slub.c
index b9f37cb0f2e..9c1d9f3b364 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -1468,6 +1468,9 @@ static void *__slab_alloc(struct kmem_cache *s,
void **object;
struct page *new;
+ /* We handle __GFP_ZERO in the caller */
+ gfpflags &= ~__GFP_ZERO;
+
if (!c->page)
goto new_slab;