summaryrefslogtreecommitdiffstats
path: root/arch/arm/lib
diff options
context:
space:
mode:
authorCatalin Marinas <catalin.marinas@arm.com>2006-01-12 16:53:51 +0000
committerRussell King <rmk+kernel@arm.linux.org.uk>2006-01-12 16:53:51 +0000
commit90303b102353302e84758f245906368907e6a23b (patch)
tree3e417666985ee5875c2d3435518de2c4bdc9b88d /arch/arm/lib
parentece5f7b3c4fde70a1ae4add7372ebca5c90bc34d (diff)
downloadkernel-crypto-90303b102353302e84758f245906368907e6a23b.tar.gz
kernel-crypto-90303b102353302e84758f245906368907e6a23b.tar.xz
kernel-crypto-90303b102353302e84758f245906368907e6a23b.zip
[ARM] 3256/1: Make the function-returning ldm's use sp as the base register
Patch from Catalin Marinas If the low interrupt latency mode is enabled for the CPU (from ARMv6 onwards), the ldm/stm instructions are no longer atomic. An ldm instruction restoring the sp and pc registers can be interrupted immediately after sp was updated but before the pc. If this happens, the CPU restores the base register to the value before the ldm instruction but if the base register is not sp, the interrupt routine will corrupt the stack and the restarted ldm instruction will load garbage. Note that future ARM cores might always run in the low interrupt latency mode. Signed-off-by: Catalin Marinas <catalin.marinas@arm.com> Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
Diffstat (limited to 'arch/arm/lib')
-rw-r--r--arch/arm/lib/csumpartialcopy.S6
-rw-r--r--arch/arm/lib/csumpartialcopygeneric.S6
-rw-r--r--arch/arm/lib/csumpartialcopyuser.S8
3 files changed, 11 insertions, 9 deletions
diff --git a/arch/arm/lib/csumpartialcopy.S b/arch/arm/lib/csumpartialcopy.S
index 990ee63b246..21effe0dbf9 100644
--- a/arch/arm/lib/csumpartialcopy.S
+++ b/arch/arm/lib/csumpartialcopy.S
@@ -18,11 +18,13 @@
*/
.macro save_regs
+ mov ip, sp
stmfd sp!, {r1, r4 - r8, fp, ip, lr, pc}
+ sub fp, ip, #4
.endm
- .macro load_regs,flags
- LOADREGS(\flags,fp,{r1, r4 - r8, fp, sp, pc})
+ .macro load_regs
+ ldmfd sp, {r1, r4 - r8, fp, sp, pc}
.endm
.macro load1b, reg1
diff --git a/arch/arm/lib/csumpartialcopygeneric.S b/arch/arm/lib/csumpartialcopygeneric.S
index 4a4609c1909..c50e8f5285d 100644
--- a/arch/arm/lib/csumpartialcopygeneric.S
+++ b/arch/arm/lib/csumpartialcopygeneric.S
@@ -23,7 +23,7 @@ len .req r2
sum .req r3
.Lzero: mov r0, sum
- load_regs ea
+ load_regs
/*
* Align an unaligned destination pointer. We know that
@@ -87,9 +87,7 @@ sum .req r3
b .Ldone
FN_ENTRY
- mov ip, sp
save_regs
- sub fp, ip, #4
cmp len, #8 @ Ensure that we have at least
blo .Lless8 @ 8 bytes to copy.
@@ -163,7 +161,7 @@ FN_ENTRY
ldr sum, [sp, #0] @ dst
tst sum, #1
movne r0, r0, ror #8
- load_regs ea
+ load_regs
.Lsrc_not_aligned:
adc sum, sum, #0 @ include C from dst alignment
diff --git a/arch/arm/lib/csumpartialcopyuser.S b/arch/arm/lib/csumpartialcopyuser.S
index 333bca292de..c3b93e22ea2 100644
--- a/arch/arm/lib/csumpartialcopyuser.S
+++ b/arch/arm/lib/csumpartialcopyuser.S
@@ -18,11 +18,13 @@
.text
.macro save_regs
+ mov ip, sp
stmfd sp!, {r1 - r2, r4 - r8, fp, ip, lr, pc}
+ sub fp, ip, #4
.endm
- .macro load_regs,flags
- ldm\flags fp, {r1, r2, r4-r8, fp, sp, pc}
+ .macro load_regs
+ ldmfd sp, {r1, r2, r4-r8, fp, sp, pc}
.endm
.macro load1b, reg1
@@ -100,5 +102,5 @@
6002: teq r2, r1
strneb r0, [r1], #1
bne 6002b
- load_regs ea
+ load_regs
.previous