summaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorThomas Gleixner <tglx@linutronix.de>2008-04-28 09:23:24 +0200
committerThomas Gleixner <tglx@linutronix.de>2008-04-28 22:22:21 +0200
commit0c96c5979a522c3323c30a078a70120e29b5bdbc (patch)
tree1cd5cabe5a3591ce8f22640675921289298d0c40 /kernel
parente31a94ed371c70855eb30b77c490d6d85dd4da26 (diff)
downloadkernel-crypto-0c96c5979a522c3323c30a078a70120e29b5bdbc.tar.gz
kernel-crypto-0c96c5979a522c3323c30a078a70120e29b5bdbc.tar.xz
kernel-crypto-0c96c5979a522c3323c30a078a70120e29b5bdbc.zip
hrtimer: raise softirq unlocked to avoid circular lock dependency
The scheduler hrtimer bits in 2.6.25 introduced a circular lock dependency in a rare code path: ======================================================= [ INFO: possible circular locking dependency detected ] 2.6.25-sched-devel.git-x86-latest.git #19 ------------------------------------------------------- X/2980 is trying to acquire lock: (&rq->rq_lock_key#2){++..}, at: [<ffffffff80230146>] task_rq_lock+0x56/0xa0 but task is already holding lock: (&cpu_base->lock){++..}, at: [<ffffffff80257ae1>] lock_hrtimer_base+0x31/0x60 which lock already depends on the new lock. The scenario which leads to this is: posix-timer signal is delivered -> posix-timer is rearmed timer is already expired in hrtimer_enqueue() -> softirq is raised To prevent this we need to move the raise of the softirq out of the base->lock protected code path. Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Cc: stable@kernel.org Acked-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/hrtimer.c19
1 files changed, 17 insertions, 2 deletions
diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c
index e379ef0e9c2..dea4c9124ac 100644
--- a/kernel/hrtimer.c
+++ b/kernel/hrtimer.c
@@ -590,7 +590,6 @@ static inline int hrtimer_enqueue_reprogram(struct hrtimer *timer,
list_add_tail(&timer->cb_entry,
&base->cpu_base->cb_pending);
timer->state = HRTIMER_STATE_PENDING;
- raise_softirq(HRTIMER_SOFTIRQ);
return 1;
default:
BUG();
@@ -633,6 +632,11 @@ static int hrtimer_switch_to_hres(void)
return 1;
}
+static inline void hrtimer_raise_softirq(void)
+{
+ raise_softirq(HRTIMER_SOFTIRQ);
+}
+
#else
static inline int hrtimer_hres_active(void) { return 0; }
@@ -651,6 +655,7 @@ static inline int hrtimer_reprogram(struct hrtimer *timer,
{
return 0;
}
+static inline void hrtimer_raise_softirq(void) { }
#endif /* CONFIG_HIGH_RES_TIMERS */
@@ -850,7 +855,7 @@ hrtimer_start(struct hrtimer *timer, ktime_t tim, const enum hrtimer_mode mode)
{
struct hrtimer_clock_base *base, *new_base;
unsigned long flags;
- int ret;
+ int ret, raise;
base = lock_hrtimer_base(timer, &flags);
@@ -884,8 +889,18 @@ hrtimer_start(struct hrtimer *timer, ktime_t tim, const enum hrtimer_mode mode)
enqueue_hrtimer(timer, new_base,
new_base->cpu_base == &__get_cpu_var(hrtimer_bases));
+ /*
+ * The timer may be expired and moved to the cb_pending
+ * list. We can not raise the softirq with base lock held due
+ * to a possible deadlock with runqueue lock.
+ */
+ raise = timer->state == HRTIMER_STATE_PENDING;
+
unlock_hrtimer_base(timer, &flags);
+ if (raise)
+ hrtimer_raise_softirq();
+
return ret;
}
EXPORT_SYMBOL_GPL(hrtimer_start);