summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorChristoph Lameter <clameter@sgi.com>2007-05-09 02:32:38 -0700
committerLinus Torvalds <torvalds@woody.linux-foundation.org>2007-05-09 12:30:45 -0700
commit1f99a283dc13b167b93b2e453a30782955f165c2 (patch)
tree85960206f9b5757680b768d6900c60928f1c2117
parentabcd08a6f564171ffa05bc77d1c2ba4cfa949653 (diff)
downloadkernel-crypto-1f99a283dc13b167b93b2e453a30782955f165c2.tar.gz
kernel-crypto-1f99a283dc13b167b93b2e453a30782955f165c2.tar.xz
kernel-crypto-1f99a283dc13b167b93b2e453a30782955f165c2.zip
SLUB: clean up krealloc
We really do not need all this gaga there. ksize gives us all the information we need to figure out if the object can cope with the new size. Signed-off-by: Christoph Lameter <clameter@sgi.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r--mm/slub.c15
1 files changed, 4 insertions, 11 deletions
diff --git a/mm/slub.c b/mm/slub.c
index 1832ae1ea53..5d425d7116e 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -2199,9 +2199,8 @@ EXPORT_SYMBOL(kmem_cache_shrink);
*/
void *krealloc(const void *p, size_t new_size, gfp_t flags)
{
- struct kmem_cache *new_cache;
void *ret;
- struct page *page;
+ size_t ks;
if (unlikely(!p))
return kmalloc(new_size, flags);
@@ -2211,19 +2210,13 @@ void *krealloc(const void *p, size_t new_size, gfp_t flags)
return NULL;
}
- page = virt_to_head_page(p);
-
- new_cache = get_slab(new_size, flags);
-
- /*
- * If new size fits in the current cache, bail out.
- */
- if (likely(page->slab == new_cache))
+ ks = ksize(p);
+ if (ks >= new_size)
return (void *)p;
ret = kmalloc(new_size, flags);
if (ret) {
- memcpy(ret, p, min(new_size, ksize(p)));
+ memcpy(ret, p, min(new_size, ks));
kfree(p);
}
return ret;