diff options
Diffstat (limited to 'arch/x86_64/kernel/process.c')
-rw-r--r-- | arch/x86_64/kernel/process.c | 37 |
1 files changed, 12 insertions, 25 deletions
diff --git a/arch/x86_64/kernel/process.c b/arch/x86_64/kernel/process.c index 22a05dec81a..70dd8e5c688 100644 --- a/arch/x86_64/kernel/process.c +++ b/arch/x86_64/kernel/process.c @@ -35,8 +35,8 @@ #include <linux/ptrace.h> #include <linux/utsname.h> #include <linux/random.h> -#include <linux/kprobes.h> #include <linux/notifier.h> +#include <linux/kprobes.h> #include <asm/uaccess.h> #include <asm/pgtable.h> @@ -66,24 +66,17 @@ EXPORT_SYMBOL(boot_option_idle_override); void (*pm_idle)(void); static DEFINE_PER_CPU(unsigned int, cpu_idle_state); -static struct notifier_block *idle_notifier; -static DEFINE_SPINLOCK(idle_notifier_lock); +static ATOMIC_NOTIFIER_HEAD(idle_notifier); void idle_notifier_register(struct notifier_block *n) { - unsigned long flags; - spin_lock_irqsave(&idle_notifier_lock, flags); - notifier_chain_register(&idle_notifier, n); - spin_unlock_irqrestore(&idle_notifier_lock, flags); + atomic_notifier_chain_register(&idle_notifier, n); } EXPORT_SYMBOL_GPL(idle_notifier_register); void idle_notifier_unregister(struct notifier_block *n) { - unsigned long flags; - spin_lock_irqsave(&idle_notifier_lock, flags); - notifier_chain_unregister(&idle_notifier, n); - spin_unlock_irqrestore(&idle_notifier_lock, flags); + atomic_notifier_chain_unregister(&idle_notifier, n); } EXPORT_SYMBOL(idle_notifier_unregister); @@ -93,13 +86,13 @@ static DEFINE_PER_CPU(enum idle_state, idle_state) = CPU_NOT_IDLE; void enter_idle(void) { __get_cpu_var(idle_state) = CPU_IDLE; - notifier_call_chain(&idle_notifier, IDLE_START, NULL); + atomic_notifier_call_chain(&idle_notifier, IDLE_START, NULL); } static void __exit_idle(void) { __get_cpu_var(idle_state) = CPU_NOT_IDLE; - notifier_call_chain(&idle_notifier, IDLE_END, NULL); + atomic_notifier_call_chain(&idle_notifier, IDLE_END, NULL); } /* Called from interrupts to signify idle end */ @@ -114,7 +107,7 @@ void exit_idle(void) * We use this if we don't have any better * idle routine.. */ -void default_idle(void) +static void default_idle(void) { local_irq_enable(); @@ -353,13 +346,6 @@ void exit_thread(void) struct task_struct *me = current; struct thread_struct *t = &me->thread; - /* - * Remove function-return probe instances associated with this task - * and put them back on the free list. Do not insert an exit probe for - * this function, it will be disabled by kprobe_flush_task if you do. - */ - kprobe_flush_task(me); - if (me->thread.io_bitmap_ptr) { struct tss_struct *tss = &per_cpu(init_tss, get_cpu()); @@ -508,7 +494,7 @@ out: /* * This special macro can be used to load a debugging register */ -#define loaddebug(thread,r) set_debug(thread->debugreg ## r, r) +#define loaddebug(thread,r) set_debugreg(thread->debugreg ## r, r) /* * switch_to(x,y) should switch tasks from x to y. @@ -527,8 +513,6 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p) int cpu = smp_processor_id(); struct tss_struct *tss = &per_cpu(init_tss, cpu); - unlazy_fpu(prev_p); - /* * Reload esp0, LDT and the page table pointer: */ @@ -586,11 +570,14 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p) } /* - * Switch the PDA context. + * Switch the PDA and FPU contexts. */ prev->userrsp = read_pda(oldrsp); write_pda(oldrsp, next->userrsp); write_pda(pcurrent, next_p); + /* This must be here to ensure both math_state_restore() and + kernel_fpu_begin() work consistently. */ + unlazy_fpu(prev_p); write_pda(kernelstack, task_stack_page(next_p) + THREAD_SIZE - PDA_STACKOFFSET); |