summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorBenjamin Herrenschmidt <benh@kernel.crashing.org>2007-05-10 22:22:45 -0700
committerLinus Torvalds <torvalds@woody.linux-foundation.org>2007-05-11 08:29:34 -0700
commite1fa2e136ff64a3814a98c03d46320b9e80d29c8 (patch)
tree14620d6f3361274139cccded4b38dc7e0347a593
parent2acdb1694494eb6f17b44b2b3065879af32d0d46 (diff)
downloadkernel-crypto-e1fa2e136ff64a3814a98c03d46320b9e80d29c8.tar.gz
kernel-crypto-e1fa2e136ff64a3814a98c03d46320b9e80d29c8.tar.xz
kernel-crypto-e1fa2e136ff64a3814a98c03d46320b9e80d29c8.zip
powerpc: fixup hard_irq_disable semantics
This patch renames the raw hard_irq_{enable,disable} into __hard_irq_{enable,disable} and introduces a higher level hard_irq_disable() function that can be used by any code to enforce that IRQs are fully disabled, not only lazy disabled. The difference with the __ versions is that it will update some per-processor fields so that the kernel keeps track and properly re-enables them in the next local_irq_disable(); This prepares powerpc for my next patch that introduces hard_irq_disable() generically. Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org> Cc: Rusty Russell <rusty@rustcorp.com.au> Cc: Paul Mackerras <paulus@samba.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r--arch/powerpc/kernel/irq.c2
-rw-r--r--arch/powerpc/kernel/swsusp.c4
-rw-r--r--arch/powerpc/platforms/cell/pervasive.c6
-rw-r--r--include/asm-powerpc/hw_irq.h11
4 files changed, 12 insertions, 11 deletions
diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c
index 9ed4931af16..068377a2a8d 100644
--- a/arch/powerpc/kernel/irq.c
+++ b/arch/powerpc/kernel/irq.c
@@ -173,7 +173,7 @@ void local_irq_restore(unsigned long en)
lv1_get_version_info(&tmp);
}
- hard_irq_enable();
+ __hard_irq_enable();
}
#endif /* CONFIG_PPC64 */
diff --git a/arch/powerpc/kernel/swsusp.c b/arch/powerpc/kernel/swsusp.c
index 064a7ba4f02..77b7b34b595 100644
--- a/arch/powerpc/kernel/swsusp.c
+++ b/arch/powerpc/kernel/swsusp.c
@@ -36,8 +36,4 @@ void restore_processor_state(void)
#ifdef CONFIG_PPC32
set_context(current->active_mm->context.id, current->active_mm->pgd);
#endif
-
-#ifdef CONFIG_PPC64
- hard_irq_enable();
-#endif
}
diff --git a/arch/powerpc/platforms/cell/pervasive.c b/arch/powerpc/platforms/cell/pervasive.c
index 8c20f0fb865..812bf563ed6 100644
--- a/arch/powerpc/platforms/cell/pervasive.c
+++ b/arch/powerpc/platforms/cell/pervasive.c
@@ -43,12 +43,10 @@ static void cbe_power_save(void)
unsigned long ctrl, thread_switch_control;
/*
- * We need to hard disable interrupts, but we also need to mark them
- * hard disabled in the PACA so that the local_irq_enable() done by
- * our caller upon return propertly hard enables.
+ * We need to hard disable interrupts, the local_irq_enable() done by
+ * our caller upon return will hard re-enable.
*/
hard_irq_disable();
- get_paca()->hard_enabled = 0;
ctrl = mfspr(SPRN_CTRLF);
diff --git a/include/asm-powerpc/hw_irq.h b/include/asm-powerpc/hw_irq.h
index 9e4dd98eb22..a7b60bf639e 100644
--- a/include/asm-powerpc/hw_irq.h
+++ b/include/asm-powerpc/hw_irq.h
@@ -48,8 +48,15 @@ extern void iseries_handle_interrupts(void);
#define irqs_disabled() (local_get_flags() == 0)
-#define hard_irq_enable() __mtmsrd(mfmsr() | MSR_EE, 1)
-#define hard_irq_disable() __mtmsrd(mfmsr() & ~MSR_EE, 1)
+#define __hard_irq_enable() __mtmsrd(mfmsr() | MSR_EE, 1)
+#define __hard_irq_disable() __mtmsrd(mfmsr() & ~MSR_EE, 1)
+
+#define hard_irq_disable() \
+ do { \
+ __hard_irq_disable(); \
+ get_paca()->soft_enabled = 0; \
+ get_paca()->hard_enabled = 0; \
+ } while(0)
#else