diff options
author | Ingo Molnar <mingo@elte.hu> | 2009-04-01 21:54:19 +0200 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2009-04-02 00:49:02 +0200 |
commit | 8302294f43250dc337108c51882a6007f2b1e2e0 (patch) | |
tree | 85acd4440799c46a372df9cad170fa0c21e59096 /kernel/workqueue.c | |
parent | 4fe70410d9a219dabb47328effccae7e7f2a6e26 (diff) | |
parent | 2e572895bf3203e881356a4039ab0fa428ed2639 (diff) | |
download | kernel-crypto-8302294f43250dc337108c51882a6007f2b1e2e0.tar.gz kernel-crypto-8302294f43250dc337108c51882a6007f2b1e2e0.tar.xz kernel-crypto-8302294f43250dc337108c51882a6007f2b1e2e0.zip |
Merge branch 'tracing/core-v2' into tracing-for-linus
Conflicts:
include/linux/slub_def.h
lib/Kconfig.debug
mm/slob.c
mm/slub.c
Diffstat (limited to 'kernel/workqueue.c')
-rw-r--r-- | kernel/workqueue.c | 16 |
1 files changed, 15 insertions, 1 deletions
diff --git a/kernel/workqueue.c b/kernel/workqueue.c index 9aedd9fd825..3003ecad08f 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -33,6 +33,7 @@ #include <linux/kallsyms.h> #include <linux/debug_locks.h> #include <linux/lockdep.h> +#include <trace/workqueue.h> /* * The per-CPU workqueue (if single thread, we always use the first @@ -125,9 +126,13 @@ struct cpu_workqueue_struct *get_wq_data(struct work_struct *work) return (void *) (atomic_long_read(&work->data) & WORK_STRUCT_WQ_DATA_MASK); } +DEFINE_TRACE(workqueue_insertion); + static void insert_work(struct cpu_workqueue_struct *cwq, struct work_struct *work, struct list_head *head) { + trace_workqueue_insertion(cwq->thread, work); + set_wq_data(work, cwq); /* * Ensure that we get the right work->data if we see the @@ -259,6 +264,8 @@ int queue_delayed_work_on(int cpu, struct workqueue_struct *wq, } EXPORT_SYMBOL_GPL(queue_delayed_work_on); +DEFINE_TRACE(workqueue_execution); + static void run_workqueue(struct cpu_workqueue_struct *cwq) { spin_lock_irq(&cwq->lock); @@ -284,7 +291,7 @@ static void run_workqueue(struct cpu_workqueue_struct *cwq) */ struct lockdep_map lockdep_map = work->lockdep_map; #endif - + trace_workqueue_execution(cwq->thread, work); cwq->current_work = work; list_del_init(cwq->worklist.next); spin_unlock_irq(&cwq->lock); @@ -765,6 +772,8 @@ init_cpu_workqueue(struct workqueue_struct *wq, int cpu) return cwq; } +DEFINE_TRACE(workqueue_creation); + static int create_workqueue_thread(struct cpu_workqueue_struct *cwq, int cpu) { struct sched_param param = { .sched_priority = MAX_RT_PRIO-1 }; @@ -787,6 +796,8 @@ static int create_workqueue_thread(struct cpu_workqueue_struct *cwq, int cpu) sched_setscheduler_nocheck(p, SCHED_FIFO, ¶m); cwq->thread = p; + trace_workqueue_creation(cwq->thread, cpu); + return 0; } @@ -868,6 +879,8 @@ struct workqueue_struct *__create_workqueue_key(const char *name, } EXPORT_SYMBOL_GPL(__create_workqueue_key); +DEFINE_TRACE(workqueue_destruction); + static void cleanup_workqueue_thread(struct cpu_workqueue_struct *cwq) { /* @@ -891,6 +904,7 @@ static void cleanup_workqueue_thread(struct cpu_workqueue_struct *cwq) * checks list_empty(), and a "normal" queue_work() can't use * a dead CPU. */ + trace_workqueue_destruction(cwq->thread); kthread_stop(cwq->thread); cwq->thread = NULL; } |