diff options
author | Benjamin Herrenschmidt <benh@kernel.crashing.org> | 2008-12-18 19:13:38 +0000 |
---|---|---|
committer | Paul Mackerras <paulus@samba.org> | 2008-12-21 14:21:16 +1100 |
commit | f048aace29e007f2b642097e2da8231e0e9cce2d (patch) | |
tree | 5e99b1d1d37817703132e97388994386a7bee8da /arch/powerpc/include/asm/mmu.h | |
parent | 7c03d653cd257793dc40520c94e229b5fd0578e7 (diff) | |
download | kernel-crypto-f048aace29e007f2b642097e2da8231e0e9cce2d.tar.gz kernel-crypto-f048aace29e007f2b642097e2da8231e0e9cce2d.tar.xz kernel-crypto-f048aace29e007f2b642097e2da8231e0e9cce2d.zip |
powerpc/mm: Add SMP support to no-hash TLB handling
This commit moves the whole no-hash TLB handling out of line into a
new tlb_nohash.c file, and implements some basic SMP support using
IPIs and/or broadcast tlbivax instructions.
Note that I'm using local invalidations for D->I cache coherency.
At worst, if another processor is trying to execute the same and
has the old entry in its TLB, it will just take a fault and re-do
the TLB flush locally (it won't re-do the cache flush in any case).
Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Acked-by: Kumar Gala <galak@kernel.crashing.org>
Signed-off-by: Paul Mackerras <paulus@samba.org>
Diffstat (limited to 'arch/powerpc/include/asm/mmu.h')
-rw-r--r-- | arch/powerpc/include/asm/mmu.h | 16 |
1 files changed, 16 insertions, 0 deletions
diff --git a/arch/powerpc/include/asm/mmu.h b/arch/powerpc/include/asm/mmu.h index dc8c0aef5e6..6e763991131 100644 --- a/arch/powerpc/include/asm/mmu.h +++ b/arch/powerpc/include/asm/mmu.h @@ -30,6 +30,22 @@ */ #define MMU_FTR_BIG_PHYS ASM_CONST(0x00020000) +/* Enable use of broadcast TLB invalidations. We don't always set it + * on processors that support it due to other constraints with the + * use of such invalidations + */ +#define MMU_FTR_USE_TLBIVAX_BCAST ASM_CONST(0x00040000) + +/* Enable use of tlbilx invalidate-by-PID variant. + */ +#define MMU_FTR_USE_TLBILX_PID ASM_CONST(0x00080000) + +/* This indicates that the processor cannot handle multiple outstanding + * broadcast tlbivax or tlbsync. This makes the code use a spinlock + * around such invalidate forms. + */ +#define MMU_FTR_LOCK_BCAST_INVAL ASM_CONST(0x00100000) + #ifndef __ASSEMBLY__ #include <asm/cputable.h> |