diff options
author | Nicolas Pitre <nico@cam.org> | 2005-12-19 22:20:51 +0000 |
---|---|---|
committer | Russell King <rmk+kernel@arm.linux.org.uk> | 2005-12-19 22:20:51 +0000 |
commit | 7c612bfd4ed3064fd48a4877a114c8186547367b (patch) | |
tree | bd307ea5bf9c0ad16ad1a590490d9095107d35fc /arch | |
parent | 567bd98017d9c9f2ac1c148ddc78c062e8abd398 (diff) | |
download | kernel-crypto-7c612bfd4ed3064fd48a4877a114c8186547367b.tar.gz kernel-crypto-7c612bfd4ed3064fd48a4877a114c8186547367b.tar.xz kernel-crypto-7c612bfd4ed3064fd48a4877a114c8186547367b.zip |
[ARM] 3210/1: add missing memory barrier helper for NPTL support
Patch from Nicolas Pitre
Strictly speaking, the NPTL kernel helpers are required for pre ARMv6
only. They are available on ARMv6+ as well for obvious compatibility
reasons. However there are cases where extra memory barriers are needed
when using an SMP ARMv6 machine but not on pre-ARMv6.
This patch adds a memory barrier kernel helper that glibc can use as
needed for pre-ARMv6 binaries to be forward compatible with an SMP
kernel on ARMv6, as well as the necessary dmb instructions to the
cmpxchg helper.
Signed-off-by: Nicolas Pitre <nico@cam.org>
Acked-by: Daniel Jacobowitz <dan@codesourcery.com>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
Diffstat (limited to 'arch')
-rw-r--r-- | arch/arm/kernel/entry-armv.S | 49 |
1 files changed, 49 insertions, 0 deletions
diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S index d9fb819bf7c..2a8d27e18fa 100644 --- a/arch/arm/kernel/entry-armv.S +++ b/arch/arm/kernel/entry-armv.S @@ -614,6 +614,47 @@ __kuser_helper_start: /* * Reference prototype: * + * void __kernel_memory_barrier(void) + * + * Input: + * + * lr = return address + * + * Output: + * + * none + * + * Clobbered: + * + * the Z flag might be lost + * + * Definition and user space usage example: + * + * typedef void (__kernel_dmb_t)(void); + * #define __kernel_dmb (*(__kernel_dmb_t *)0xffff0fa0) + * + * Apply any needed memory barrier to preserve consistency with data modified + * manually and __kuser_cmpxchg usage. + * + * This could be used as follows: + * + * #define __kernel_dmb() \ + * asm volatile ( "mov r0, #0xffff0fff; mov lr, pc; sub pc, r0, #95" \ + * : : : "lr","cc" ) + */ + +__kuser_memory_barrier: @ 0xffff0fa0 + +#if __LINUX_ARM_ARCH__ >= 6 && defined(CONFIG_SMP) + mcr p15, 0, r0, c7, c10, 5 @ dmb +#endif + mov pc, lr + + .align 5 + +/* + * Reference prototype: + * * int __kernel_cmpxchg(int oldval, int newval, int *ptr) * * Input: @@ -642,6 +683,8 @@ __kuser_helper_start: * The C flag is also set if *ptr was changed to allow for assembly * optimization in the calling code. * + * Note: this routine already includes memory barriers as needed. + * * For example, a user space atomic_add implementation could look like this: * * #define atomic_add(ptr, val) \ @@ -698,10 +741,16 @@ __kuser_cmpxchg: @ 0xffff0fc0 #else +#ifdef CONFIG_SMP + mcr p15, 0, r0, c7, c10, 5 @ dmb +#endif ldrex r3, [r2] subs r3, r3, r0 strexeq r3, r1, [r2] rsbs r0, r3, #0 +#ifdef CONFIG_SMP + mcr p15, 0, r0, c7, c10, 5 @ dmb +#endif mov pc, lr #endif |