diff options
author | Paul Mackerras <paulus@samba.org> | 2006-09-13 22:08:26 +1000 |
---|---|---|
committer | Paul Mackerras <paulus@samba.org> | 2006-09-13 22:08:26 +1000 |
commit | f007cacffc8870702a1473d83ba5e4922d54e17c (patch) | |
tree | 7faa1dbd7ccd2c4536f29852e0fedf7499d90508 /include/asm-powerpc/spinlock.h | |
parent | 2e8e8dacc566cc91cd8707cb092e76c7bbfab178 (diff) | |
download | kernel-crypto-f007cacffc8870702a1473d83ba5e4922d54e17c.tar.gz kernel-crypto-f007cacffc8870702a1473d83ba5e4922d54e17c.tar.xz kernel-crypto-f007cacffc8870702a1473d83ba5e4922d54e17c.zip |
[POWERPC] Fix MMIO ops to provide expected barrier behaviour
This changes the writeX family of functions to have a sync instruction
before the MMIO store rather than after, because the generally expected
behaviour is that the device receiving the MMIO store can be guaranteed
to see the effects of any preceding writes to normal memory.
To preserve ordering between writeX and readX, and to preserve ordering
between preceding stores and the readX, the readX family of functions
have had an sync added before the load.
Although writeX followed by spin_unlock is not officially guaranteed
to keep the writeX inside the spin-locked region unless an mmiowb()
is used, there are currently drivers that depend on the previous
behaviour on powerpc, which was that the mmiowb wasn't actually required.
Therefore we have a per-cpu flag that is set by writeX, cleared by
__raw_spin_lock and mmiowb, and tested by __raw_spin_unlock. If it is
set, __raw_spin_unlock does a sync and clears it.
This changes both 32-bit and 64-bit readX/writeX. 32-bit already has a
sync in __raw_spin_unlock (since lwsync doesn't exist on 32-bit), and thus
doesn't need the per-cpu flag.
Tested on G5 (PPC970) and POWER5.
Signed-off-by: Paul Mackerras <paulus@samba.org>
Diffstat (limited to 'include/asm-powerpc/spinlock.h')
-rw-r--r-- | include/asm-powerpc/spinlock.h | 17 |
1 files changed, 17 insertions, 0 deletions
diff --git a/include/asm-powerpc/spinlock.h b/include/asm-powerpc/spinlock.h index 895cb6d3a42..c31e4382a77 100644 --- a/include/asm-powerpc/spinlock.h +++ b/include/asm-powerpc/spinlock.h @@ -36,6 +36,19 @@ #define LOCK_TOKEN 1 #endif +#if defined(CONFIG_PPC64) && defined(CONFIG_SMP) +#define CLEAR_IO_SYNC (get_paca()->io_sync = 0) +#define SYNC_IO do { \ + if (unlikely(get_paca()->io_sync)) { \ + mb(); \ + get_paca()->io_sync = 0; \ + } \ + } while (0) +#else +#define CLEAR_IO_SYNC +#define SYNC_IO +#endif + /* * This returns the old value in the lock, so we succeeded * in getting the lock if the return value is 0. @@ -61,6 +74,7 @@ static __inline__ unsigned long __spin_trylock(raw_spinlock_t *lock) static int __inline__ __raw_spin_trylock(raw_spinlock_t *lock) { + CLEAR_IO_SYNC; return __spin_trylock(lock) == 0; } @@ -91,6 +105,7 @@ extern void __rw_yield(raw_rwlock_t *lock); static void __inline__ __raw_spin_lock(raw_spinlock_t *lock) { + CLEAR_IO_SYNC; while (1) { if (likely(__spin_trylock(lock) == 0)) break; @@ -107,6 +122,7 @@ static void __inline__ __raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long { unsigned long flags_dis; + CLEAR_IO_SYNC; while (1) { if (likely(__spin_trylock(lock) == 0)) break; @@ -124,6 +140,7 @@ static void __inline__ __raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long static __inline__ void __raw_spin_unlock(raw_spinlock_t *lock) { + SYNC_IO; __asm__ __volatile__("# __raw_spin_unlock\n\t" LWSYNC_ON_SMP: : :"memory"); lock->slock = 0; |