summaryrefslogtreecommitdiffstats
path: root/include
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@woody.osdl.org>2006-12-10 10:00:00 -0800
committerLinus Torvalds <torvalds@woody.osdl.org>2006-12-10 10:00:00 -0800
commitedb16bec41db68b22799a5fbad82c3891e637565 (patch)
treed019d2fa8fbf374d810f66e1a210648e53b0c593 /include
parentbb7320d1d96dc2e479180ae8e7a112caf0726ace (diff)
parentf0882589666440d573f657cb3a1d5f66f3caa157 (diff)
downloadkernel-crypto-edb16bec41db68b22799a5fbad82c3891e637565.tar.gz
kernel-crypto-edb16bec41db68b22799a5fbad82c3891e637565.tar.xz
kernel-crypto-edb16bec41db68b22799a5fbad82c3891e637565.zip
Merge master.kernel.org:/pub/scm/linux/kernel/git/davem/sparc-2.6
* master.kernel.org:/pub/scm/linux/kernel/git/davem/sparc-2.6: [SPARC64]: Fix several kprobes bugs. [SPARC64]: Update defconfig. [SPARC64]: dma remove extra brackets [SPARC{32,64}]: Propagate ptrace_traceme() return value. [SPARC64]: Replace kmalloc+memset with kzalloc [SPARC]: Check kzalloc() return value in SUN4D irq/iommu init. [SPARC]: Replace kmalloc+memset with kzalloc [SPARC64]: Run ctrl-alt-del action for sun4v powerdown request. [SPARC64]: Unaligned accesses to userspace are hard errors. [SPARC64]: Call do_mathemu on illegal instruction traps too. [SPARC64]: Update defconfig. [SPARC64]: Add irqtrace/stacktrace/lockdep support.
Diffstat (limited to 'include')
-rw-r--r--include/asm-sparc64/dma.h6
-rw-r--r--include/asm-sparc64/irqflags.h89
-rw-r--r--include/asm-sparc64/kprobes.h11
-rw-r--r--include/asm-sparc64/rwsem.h32
-rw-r--r--include/asm-sparc64/system.h49
-rw-r--r--include/asm-sparc64/ttable.h45
6 files changed, 169 insertions, 63 deletions
diff --git a/include/asm-sparc64/dma.h b/include/asm-sparc64/dma.h
index 27f65972b3b..93e5a062df8 100644
--- a/include/asm-sparc64/dma.h
+++ b/include/asm-sparc64/dma.h
@@ -152,9 +152,9 @@ extern void dvma_init(struct sbus_bus *);
#define DMA_MAXEND(addr) (0x01000000UL-(((unsigned long)(addr))&0x00ffffffUL))
/* Yes, I hack a lot of elisp in my spare time... */
-#define DMA_ERROR_P(regs) (((sbus_readl((regs) + DMA_CSR) & DMA_HNDL_ERROR))
-#define DMA_IRQ_P(regs) (((sbus_readl((regs) + DMA_CSR)) & (DMA_HNDL_INTR | DMA_HNDL_ERROR)))
-#define DMA_WRITE_P(regs) (((sbus_readl((regs) + DMA_CSR) & DMA_ST_WRITE))
+#define DMA_ERROR_P(regs) ((sbus_readl((regs) + DMA_CSR) & DMA_HNDL_ERROR))
+#define DMA_IRQ_P(regs) ((sbus_readl((regs) + DMA_CSR)) & (DMA_HNDL_INTR | DMA_HNDL_ERROR))
+#define DMA_WRITE_P(regs) ((sbus_readl((regs) + DMA_CSR) & DMA_ST_WRITE))
#define DMA_OFF(__regs) \
do { u32 tmp = sbus_readl((__regs) + DMA_CSR); \
tmp &= ~DMA_ENABLE; \
diff --git a/include/asm-sparc64/irqflags.h b/include/asm-sparc64/irqflags.h
new file mode 100644
index 00000000000..024fc54d068
--- /dev/null
+++ b/include/asm-sparc64/irqflags.h
@@ -0,0 +1,89 @@
+/*
+ * include/asm-sparc64/irqflags.h
+ *
+ * IRQ flags handling
+ *
+ * This file gets included from lowlevel asm headers too, to provide
+ * wrapped versions of the local_irq_*() APIs, based on the
+ * raw_local_irq_*() functions from the lowlevel headers.
+ */
+#ifndef _ASM_IRQFLAGS_H
+#define _ASM_IRQFLAGS_H
+
+#ifndef __ASSEMBLY__
+
+static inline unsigned long __raw_local_save_flags(void)
+{
+ unsigned long flags;
+
+ __asm__ __volatile__(
+ "rdpr %%pil, %0"
+ : "=r" (flags)
+ );
+
+ return flags;
+}
+
+#define raw_local_save_flags(flags) \
+ do { (flags) = __raw_local_save_flags(); } while (0)
+
+static inline void raw_local_irq_restore(unsigned long flags)
+{
+ __asm__ __volatile__(
+ "wrpr %0, %%pil"
+ : /* no output */
+ : "r" (flags)
+ : "memory"
+ );
+}
+
+static inline void raw_local_irq_disable(void)
+{
+ __asm__ __volatile__(
+ "wrpr 15, %%pil"
+ : /* no outputs */
+ : /* no inputs */
+ : "memory"
+ );
+}
+
+static inline void raw_local_irq_enable(void)
+{
+ __asm__ __volatile__(
+ "wrpr 0, %%pil"
+ : /* no outputs */
+ : /* no inputs */
+ : "memory"
+ );
+}
+
+static inline int raw_irqs_disabled_flags(unsigned long flags)
+{
+ return (flags > 0);
+}
+
+static inline int raw_irqs_disabled(void)
+{
+ unsigned long flags = __raw_local_save_flags();
+
+ return raw_irqs_disabled_flags(flags);
+}
+
+/*
+ * For spinlocks, etc:
+ */
+static inline unsigned long __raw_local_irq_save(void)
+{
+ unsigned long flags = __raw_local_save_flags();
+
+ raw_local_irq_disable();
+
+ return flags;
+}
+
+#define raw_local_irq_save(flags) \
+ do { (flags) = __raw_local_irq_save(); } while (0)
+
+#endif /* (__ASSEMBLY__) */
+
+#endif /* !(_ASM_IRQFLAGS_H) */
diff --git a/include/asm-sparc64/kprobes.h b/include/asm-sparc64/kprobes.h
index c9f5c34d318..becc38fa06c 100644
--- a/include/asm-sparc64/kprobes.h
+++ b/include/asm-sparc64/kprobes.h
@@ -13,7 +13,11 @@ typedef u32 kprobe_opcode_t;
#define JPROBE_ENTRY(pentry) (kprobe_opcode_t *)pentry
#define arch_remove_kprobe(p) do {} while (0)
#define ARCH_INACTIVE_KPROBE_COUNT 0
-#define flush_insn_slot(p) do { } while (0)
+
+#define flush_insn_slot(p) \
+do { flushi(&(p)->ainsn.insn[0]); \
+ flushi(&(p)->ainsn.insn[1]); \
+} while (0)
/* Architecture specific copy of original instruction*/
struct arch_specific_insn {
@@ -23,7 +27,7 @@ struct arch_specific_insn {
struct prev_kprobe {
struct kprobe *kp;
- unsigned int status;
+ unsigned long status;
unsigned long orig_tnpc;
unsigned long orig_tstate_pil;
};
@@ -33,10 +37,7 @@ struct kprobe_ctlblk {
unsigned long kprobe_status;
unsigned long kprobe_orig_tnpc;
unsigned long kprobe_orig_tstate_pil;
- long *jprobe_saved_esp;
struct pt_regs jprobe_saved_regs;
- struct pt_regs *jprobe_saved_regs_location;
- struct sparc_stackf jprobe_saved_stack;
struct prev_kprobe prev_kprobe;
};
diff --git a/include/asm-sparc64/rwsem.h b/include/asm-sparc64/rwsem.h
index cef5e827042..1294b7ce5d0 100644
--- a/include/asm-sparc64/rwsem.h
+++ b/include/asm-sparc64/rwsem.h
@@ -23,20 +23,33 @@ struct rw_semaphore {
signed int count;
spinlock_t wait_lock;
struct list_head wait_list;
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+ struct lockdep_map dep_map;
+#endif
};
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+# define __RWSEM_DEP_MAP_INIT(lockname) , .dep_map = { .name = #lockname }
+#else
+# define __RWSEM_DEP_MAP_INIT(lockname)
+#endif
+
#define __RWSEM_INITIALIZER(name) \
-{ RWSEM_UNLOCKED_VALUE, SPIN_LOCK_UNLOCKED, LIST_HEAD_INIT((name).wait_list) }
+{ RWSEM_UNLOCKED_VALUE, SPIN_LOCK_UNLOCKED, LIST_HEAD_INIT((name).wait_list) \
+ __RWSEM_DEP_MAP_INIT(name) }
#define DECLARE_RWSEM(name) \
struct rw_semaphore name = __RWSEM_INITIALIZER(name)
-static __inline__ void init_rwsem(struct rw_semaphore *sem)
-{
- sem->count = RWSEM_UNLOCKED_VALUE;
- spin_lock_init(&sem->wait_lock);
- INIT_LIST_HEAD(&sem->wait_list);
-}
+extern void __init_rwsem(struct rw_semaphore *sem, const char *name,
+ struct lock_class_key *key);
+
+#define init_rwsem(sem) \
+do { \
+ static struct lock_class_key __key; \
+ \
+ __init_rwsem((sem), #sem, &__key); \
+} while (0)
extern void __down_read(struct rw_semaphore *sem);
extern int __down_read_trylock(struct rw_semaphore *sem);
@@ -46,6 +59,11 @@ extern void __up_read(struct rw_semaphore *sem);
extern void __up_write(struct rw_semaphore *sem);
extern void __downgrade_write(struct rw_semaphore *sem);
+static inline void __down_write_nested(struct rw_semaphore *sem, int subclass)
+{
+ __down_write(sem);
+}
+
static inline int rwsem_atomic_update(int delta, struct rw_semaphore *sem)
{
return atomic_add_return(delta, (atomic_t *)(&sem->count));
diff --git a/include/asm-sparc64/system.h b/include/asm-sparc64/system.h
index a8b7432c9a7..32281acb878 100644
--- a/include/asm-sparc64/system.h
+++ b/include/asm-sparc64/system.h
@@ -7,6 +7,9 @@
#include <asm/visasm.h>
#ifndef __ASSEMBLY__
+
+#include <linux/irqflags.h>
+
/*
* Sparc (general) CPU types
*/
@@ -72,52 +75,6 @@ do { __asm__ __volatile__("ba,pt %%xcc, 1f\n\t" \
#endif
-#define setipl(__new_ipl) \
- __asm__ __volatile__("wrpr %0, %%pil" : : "r" (__new_ipl) : "memory")
-
-#define local_irq_disable() \
- __asm__ __volatile__("wrpr 15, %%pil" : : : "memory")
-
-#define local_irq_enable() \
- __asm__ __volatile__("wrpr 0, %%pil" : : : "memory")
-
-#define getipl() \
-({ unsigned long retval; __asm__ __volatile__("rdpr %%pil, %0" : "=r" (retval)); retval; })
-
-#define swap_pil(__new_pil) \
-({ unsigned long retval; \
- __asm__ __volatile__("rdpr %%pil, %0\n\t" \
- "wrpr %1, %%pil" \
- : "=&r" (retval) \
- : "r" (__new_pil) \
- : "memory"); \
- retval; \
-})
-
-#define read_pil_and_cli() \
-({ unsigned long retval; \
- __asm__ __volatile__("rdpr %%pil, %0\n\t" \
- "wrpr 15, %%pil" \
- : "=r" (retval) \
- : : "memory"); \
- retval; \
-})
-
-#define local_save_flags(flags) ((flags) = getipl())
-#define local_irq_save(flags) ((flags) = read_pil_and_cli())
-#define local_irq_restore(flags) setipl((flags))
-
-/* On sparc64 IRQ flags are the PIL register. A value of zero
- * means all interrupt levels are enabled, any other value means
- * only IRQ levels greater than that value will be received.
- * Consequently this means that the lowest IRQ level is one.
- */
-#define irqs_disabled() \
-({ unsigned long flags; \
- local_save_flags(flags);\
- (flags > 0); \
-})
-
#define nop() __asm__ __volatile__ ("nop")
#define read_barrier_depends() do { } while(0)
diff --git a/include/asm-sparc64/ttable.h b/include/asm-sparc64/ttable.h
index f2352606a79..c2a16e18849 100644
--- a/include/asm-sparc64/ttable.h
+++ b/include/asm-sparc64/ttable.h
@@ -137,10 +137,49 @@
#endif
#define BREAKPOINT_TRAP TRAP(breakpoint_trap)
+#ifdef CONFIG_TRACE_IRQFLAGS
+
+#define TRAP_IRQ(routine, level) \
+ rdpr %pil, %g2; \
+ wrpr %g0, 15, %pil; \
+ sethi %hi(1f-4), %g7; \
+ ba,pt %xcc, etrap_irq; \
+ or %g7, %lo(1f-4), %g7; \
+ nop; \
+ nop; \
+ nop; \
+ .subsection 2; \
+1: call trace_hardirqs_off; \
+ nop; \
+ mov level, %o0; \
+ call routine; \
+ add %sp, PTREGS_OFF, %o1; \
+ ba,a,pt %xcc, rtrap_irq; \
+ .previous;
+
+#define TICK_SMP_IRQ \
+ rdpr %pil, %g2; \
+ wrpr %g0, 15, %pil; \
+ sethi %hi(1f-4), %g7; \
+ ba,pt %xcc, etrap_irq; \
+ or %g7, %lo(1f-4), %g7; \
+ nop; \
+ nop; \
+ nop; \
+ .subsection 2; \
+1: call trace_hardirqs_off; \
+ nop; \
+ call smp_percpu_timer_interrupt; \
+ add %sp, PTREGS_OFF, %o0; \
+ ba,a,pt %xcc, rtrap_irq; \
+ .previous;
+
+#else
+
#define TRAP_IRQ(routine, level) \
rdpr %pil, %g2; \
wrpr %g0, 15, %pil; \
- b,pt %xcc, etrap_irq; \
+ ba,pt %xcc, etrap_irq; \
rd %pc, %g7; \
mov level, %o0; \
call routine; \
@@ -151,12 +190,14 @@
rdpr %pil, %g2; \
wrpr %g0, 15, %pil; \
sethi %hi(109f), %g7; \
- b,pt %xcc, etrap_irq; \
+ ba,pt %xcc, etrap_irq; \
109: or %g7, %lo(109b), %g7; \
call smp_percpu_timer_interrupt; \
add %sp, PTREGS_OFF, %o0; \
ba,a,pt %xcc, rtrap_irq;
+#endif
+
#define TRAP_IVEC TRAP_NOSAVE(do_ivec)
#define BTRAP(lvl) TRAP_ARG(bad_trap, lvl)