diff options
author | Prasanna S Panchamukhi <prasanna@in.ibm.com> | 2005-09-06 15:19:28 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@g5.osdl.org> | 2005-09-07 16:57:59 -0700 |
commit | 0f2fbdcbb041f9087da42f8ac2e81f2817098d2a (patch) | |
tree | 3f54f91ca6972c6567cfe529b33fafb622b2d51c | |
parent | 3d97ae5b958855ac007b6f56a0f94ab8ade09e9e (diff) | |
download | kernel-crypto-0f2fbdcbb041f9087da42f8ac2e81f2817098d2a.tar.gz kernel-crypto-0f2fbdcbb041f9087da42f8ac2e81f2817098d2a.tar.xz kernel-crypto-0f2fbdcbb041f9087da42f8ac2e81f2817098d2a.zip |
[PATCH] kprobes: prevent possible race conditions x86_64 changes
This patch contains the x86_64 architecture specific changes to prevent the
possible race conditions.
Signed-off-by: Prasanna S Panchamukhi <prasanna@in.ibm.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
-rw-r--r-- | arch/x86_64/kernel/entry.S | 12 | ||||
-rw-r--r-- | arch/x86_64/kernel/kprobes.c | 35 | ||||
-rw-r--r-- | arch/x86_64/kernel/traps.c | 14 | ||||
-rw-r--r-- | arch/x86_64/kernel/vmlinux.lds.S | 1 | ||||
-rw-r--r-- | arch/x86_64/mm/fault.c | 4 |
5 files changed, 39 insertions, 27 deletions
diff --git a/arch/x86_64/kernel/entry.S b/arch/x86_64/kernel/entry.S index 096d470e280..be51dbe1f75 100644 --- a/arch/x86_64/kernel/entry.S +++ b/arch/x86_64/kernel/entry.S @@ -784,8 +784,9 @@ ENTRY(execve) ret CFI_ENDPROC -ENTRY(page_fault) +KPROBE_ENTRY(page_fault) errorentry do_page_fault + .previous .text ENTRY(coprocessor_error) zeroentry do_coprocessor_error @@ -797,13 +798,14 @@ ENTRY(device_not_available) zeroentry math_state_restore /* runs on exception stack */ -ENTRY(debug) +KPROBE_ENTRY(debug) CFI_STARTPROC pushq $0 CFI_ADJUST_CFA_OFFSET 8 paranoidentry do_debug jmp paranoid_exit CFI_ENDPROC + .previous .text /* runs on exception stack */ ENTRY(nmi) @@ -854,8 +856,9 @@ paranoid_schedule: jmp paranoid_userspace CFI_ENDPROC -ENTRY(int3) +KPROBE_ENTRY(int3) zeroentry do_int3 + .previous .text ENTRY(overflow) zeroentry do_overflow @@ -892,8 +895,9 @@ ENTRY(stack_segment) jmp paranoid_exit CFI_ENDPROC -ENTRY(general_protection) +KPROBE_ENTRY(general_protection) errorentry do_general_protection + .previous .text ENTRY(alignment_check) errorentry do_alignment_check diff --git a/arch/x86_64/kernel/kprobes.c b/arch/x86_64/kernel/kprobes.c index 5c6dc705148..c21cceaea27 100644 --- a/arch/x86_64/kernel/kprobes.c +++ b/arch/x86_64/kernel/kprobes.c @@ -74,7 +74,7 @@ static inline int is_IF_modifier(kprobe_opcode_t *insn) return 0; } -int arch_prepare_kprobe(struct kprobe *p) +int __kprobes arch_prepare_kprobe(struct kprobe *p) { /* insn: must be on special executable page on x86_64. */ up(&kprobe_mutex); @@ -189,7 +189,7 @@ static inline s32 *is_riprel(u8 *insn) return NULL; } -void arch_copy_kprobe(struct kprobe *p) +void __kprobes arch_copy_kprobe(struct kprobe *p) { s32 *ripdisp; memcpy(p->ainsn.insn, p->addr, MAX_INSN_SIZE); @@ -215,21 +215,21 @@ void arch_copy_kprobe(struct kprobe *p) p->opcode = *p->addr; } -void arch_arm_kprobe(struct kprobe *p) +void __kprobes arch_arm_kprobe(struct kprobe *p) { *p->addr = BREAKPOINT_INSTRUCTION; flush_icache_range((unsigned long) p->addr, (unsigned long) p->addr + sizeof(kprobe_opcode_t)); } -void arch_disarm_kprobe(struct kprobe *p) +void __kprobes arch_disarm_kprobe(struct kprobe *p) { *p->addr = p->opcode; flush_icache_range((unsigned long) p->addr, (unsigned long) p->addr + sizeof(kprobe_opcode_t)); } -void arch_remove_kprobe(struct kprobe *p) +void __kprobes arch_remove_kprobe(struct kprobe *p) { up(&kprobe_mutex); free_insn_slot(p->ainsn.insn); @@ -261,7 +261,7 @@ static inline void set_current_kprobe(struct kprobe *p, struct pt_regs *regs) kprobe_saved_rflags &= ~IF_MASK; } -static void prepare_singlestep(struct kprobe *p, struct pt_regs *regs) +static void __kprobes prepare_singlestep(struct kprobe *p, struct pt_regs *regs) { regs->eflags |= TF_MASK; regs->eflags &= ~IF_MASK; @@ -272,7 +272,8 @@ static void prepare_singlestep(struct kprobe *p, struct pt_regs *regs) regs->rip = (unsigned long)p->ainsn.insn; } -void arch_prepare_kretprobe(struct kretprobe *rp, struct pt_regs *regs) +void __kprobes arch_prepare_kretprobe(struct kretprobe *rp, + struct pt_regs *regs) { unsigned long *sara = (unsigned long *)regs->rsp; struct kretprobe_instance *ri; @@ -295,7 +296,7 @@ void arch_prepare_kretprobe(struct kretprobe *rp, struct pt_regs *regs) * Interrupts are disabled on entry as trap3 is an interrupt gate and they * remain disabled thorough out this function. */ -int kprobe_handler(struct pt_regs *regs) +int __kprobes kprobe_handler(struct pt_regs *regs) { struct kprobe *p; int ret = 0; @@ -399,7 +400,7 @@ no_kprobe: /* * Called when we hit the probe point at kretprobe_trampoline */ -int trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs) +int __kprobes trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs) { struct kretprobe_instance *ri = NULL; struct hlist_head *head; @@ -478,7 +479,7 @@ int trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs) * that is atop the stack is the address following the copied instruction. * We need to make it the address following the original instruction. */ -static void resume_execution(struct kprobe *p, struct pt_regs *regs) +static void __kprobes resume_execution(struct kprobe *p, struct pt_regs *regs) { unsigned long *tos = (unsigned long *)regs->rsp; unsigned long next_rip = 0; @@ -536,7 +537,7 @@ static void resume_execution(struct kprobe *p, struct pt_regs *regs) * Interrupts are disabled on entry as trap1 is an interrupt gate and they * remain disabled thoroughout this function. And we hold kprobe lock. */ -int post_kprobe_handler(struct pt_regs *regs) +int __kprobes post_kprobe_handler(struct pt_regs *regs) { if (!kprobe_running()) return 0; @@ -571,7 +572,7 @@ out: } /* Interrupts disabled, kprobe_lock held. */ -int kprobe_fault_handler(struct pt_regs *regs, int trapnr) +int __kprobes kprobe_fault_handler(struct pt_regs *regs, int trapnr) { if (current_kprobe->fault_handler && current_kprobe->fault_handler(current_kprobe, regs, trapnr)) @@ -590,8 +591,8 @@ int kprobe_fault_handler(struct pt_regs *regs, int trapnr) /* * Wrapper routine for handling exceptions. */ -int kprobe_exceptions_notify(struct notifier_block *self, unsigned long val, - void *data) +int __kprobes kprobe_exceptions_notify(struct notifier_block *self, + unsigned long val, void *data) { struct die_args *args = (struct die_args *)data; switch (val) { @@ -619,7 +620,7 @@ int kprobe_exceptions_notify(struct notifier_block *self, unsigned long val, return NOTIFY_DONE; } -int setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs) +int __kprobes setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs) { struct jprobe *jp = container_of(p, struct jprobe, kp); unsigned long addr; @@ -640,7 +641,7 @@ int setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs) return 1; } -void jprobe_return(void) +void __kprobes jprobe_return(void) { preempt_enable_no_resched(); asm volatile (" xchg %%rbx,%%rsp \n" @@ -651,7 +652,7 @@ void jprobe_return(void) (jprobe_saved_rsp):"memory"); } -int longjmp_break_handler(struct kprobe *p, struct pt_regs *regs) +int __kprobes longjmp_break_handler(struct kprobe *p, struct pt_regs *regs) { u8 *addr = (u8 *) (regs->rip - 1); unsigned long stack_addr = (unsigned long)jprobe_saved_rsp; diff --git a/arch/x86_64/kernel/traps.c b/arch/x86_64/kernel/traps.c index 6ead433a388..f238d6078a5 100644 --- a/arch/x86_64/kernel/traps.c +++ b/arch/x86_64/kernel/traps.c @@ -29,6 +29,7 @@ #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/nmi.h> +#include <linux/kprobes.h> #include <asm/system.h> #include <asm/uaccess.h> @@ -419,8 +420,9 @@ void die_nmi(char *str, struct pt_regs *regs) do_exit(SIGSEGV); } -static void do_trap(int trapnr, int signr, char *str, - struct pt_regs * regs, long error_code, siginfo_t *info) +static void __kprobes do_trap(int trapnr, int signr, char *str, + struct pt_regs * regs, long error_code, + siginfo_t *info) { conditional_sti(regs); @@ -504,7 +506,8 @@ DO_ERROR(18, SIGSEGV, "reserved", reserved) DO_ERROR(12, SIGBUS, "stack segment", stack_segment) DO_ERROR( 8, SIGSEGV, "double fault", double_fault) -asmlinkage void do_general_protection(struct pt_regs * regs, long error_code) +asmlinkage void __kprobes do_general_protection(struct pt_regs * regs, + long error_code) { conditional_sti(regs); @@ -622,7 +625,7 @@ asmlinkage void default_do_nmi(struct pt_regs *regs) io_check_error(reason, regs); } -asmlinkage void do_int3(struct pt_regs * regs, long error_code) +asmlinkage void __kprobes do_int3(struct pt_regs * regs, long error_code) { if (notify_die(DIE_INT3, "int3", regs, error_code, 3, SIGTRAP) == NOTIFY_STOP) { return; @@ -653,7 +656,8 @@ asmlinkage struct pt_regs *sync_regs(struct pt_regs *eregs) } /* runs on IST stack. */ -asmlinkage void do_debug(struct pt_regs * regs, unsigned long error_code) +asmlinkage void __kprobes do_debug(struct pt_regs * regs, + unsigned long error_code) { unsigned long condition; struct task_struct *tsk = current; diff --git a/arch/x86_64/kernel/vmlinux.lds.S b/arch/x86_64/kernel/vmlinux.lds.S index 2a94f9b60b2..d4abb07af52 100644 --- a/arch/x86_64/kernel/vmlinux.lds.S +++ b/arch/x86_64/kernel/vmlinux.lds.S @@ -21,6 +21,7 @@ SECTIONS *(.text) SCHED_TEXT LOCK_TEXT + KPROBES_TEXT *(.fixup) *(.gnu.warning) } = 0x9090 diff --git a/arch/x86_64/mm/fault.c b/arch/x86_64/mm/fault.c index ca914c3bd49..816732d8858 100644 --- a/arch/x86_64/mm/fault.c +++ b/arch/x86_64/mm/fault.c @@ -23,6 +23,7 @@ #include <linux/vt_kern.h> /* For unblank_screen() */ #include <linux/compiler.h> #include <linux/module.h> +#include <linux/kprobes.h> #include <asm/system.h> #include <asm/uaccess.h> @@ -294,7 +295,8 @@ int exception_trace = 1; * bit 2 == 0 means kernel, 1 means user-mode * bit 3 == 1 means fault was an instruction fetch */ -asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long error_code) +asmlinkage void __kprobes do_page_fault(struct pt_regs *regs, + unsigned long error_code) { struct task_struct *tsk; struct mm_struct *mm; |