From 01fed9311ab8a724283b3f456c12e573cb51d92b Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Sat, 10 Nov 2007 19:57:58 +0900 Subject: sh: Consolidate slab/kmalloc minalign values. Signed-off-by: Paul Mundt --- include/asm-sh/page.h | 14 ++++++++++++++ 1 file changed, 14 insertions(+) (limited to 'include/asm-sh/page.h') diff --git a/include/asm-sh/page.h b/include/asm-sh/page.h index d00a8fde7c7..d0273dbce6b 100644 --- a/include/asm-sh/page.h +++ b/include/asm-sh/page.h @@ -157,8 +157,22 @@ typedef struct { unsigned long pgd; } pgd_t; * Slub defaults to 8-byte alignment, we're only interested in 4. * Slab defaults to BYTES_PER_WORD, which ends up being the same anyways. */ +#ifdef CONFIG_SUPERH32 #define ARCH_KMALLOC_MINALIGN 4 #define ARCH_SLAB_MINALIGN 4 +#else +/* If gcc inlines memset, it will use st.q instructions. Therefore, we need + kmalloc allocations to be 8-byte aligned. Without this, the alignment + becomes BYTE_PER_WORD i.e. only 4 (since sizeof(long)==sizeof(void*)==4 on + sh64 at the moment). */ +#define ARCH_KMALLOC_MINALIGN 8 + +/* + * We want 8-byte alignment for the slab caches as well, otherwise we have + * the same BYTES_PER_WORD (sizeof(void *)) min align in kmem_cache_create(). + */ +#define ARCH_SLAB_MINALIGN 8 +#endif #endif /* __KERNEL__ */ #endif /* __ASM_SH_PAGE_H */ -- cgit