diff options
author | Christoph Lameter <clameter@sgi.com> | 2006-12-10 02:20:21 -0800 |
---|---|---|
committer | Linus Torvalds <torvalds@woody.osdl.org> | 2006-12-10 09:55:42 -0800 |
commit | fe2eea3fafb3df2f5b8a55a48bcbb0d23b3b5618 (patch) | |
tree | 5376e761d21d9164985f27e8836ec2b002dad8a1 /kernel | |
parent | 4211a9a2e94a34df8c02bc39b7ec10678ad5c2ab (diff) | |
download | kernel-crypto-fe2eea3fafb3df2f5b8a55a48bcbb0d23b3b5618.tar.gz kernel-crypto-fe2eea3fafb3df2f5b8a55a48bcbb0d23b3b5618.tar.xz kernel-crypto-fe2eea3fafb3df2f5b8a55a48bcbb0d23b3b5618.zip |
[PATCH] sched: disable interrupts for locking in load_balance()
Interrupts must be disabled for request queue locks if we want to run
load_balance() with interrupts enabled.
Signed-off-by: Christoph Lameter <clameter@sgi.com>
Cc: Peter Williams <pwil3058@bigpond.net.au>
Cc: Nick Piggin <nickpiggin@yahoo.com.au>
Cc: Christoph Lameter <clameter@sgi.com>
Cc: "Siddha, Suresh B" <suresh.b.siddha@intel.com>
Cc: "Chen, Kenneth W" <kenneth.w.chen@intel.com>
Acked-by: Ingo Molnar <mingo@elte.hu>
Cc: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/sched.c | 11 |
1 files changed, 6 insertions, 5 deletions
diff --git a/kernel/sched.c b/kernel/sched.c index b5b35013500..d327511d268 100644 --- a/kernel/sched.c +++ b/kernel/sched.c @@ -2546,8 +2546,6 @@ static inline unsigned long minus_1_or_zero(unsigned long n) /* * Check this_cpu to ensure it is balanced within domain. Attempt to move * tasks if there is an imbalance. - * - * Called with this_rq unlocked. */ static int load_balance(int this_cpu, struct rq *this_rq, struct sched_domain *sd, enum idle_type idle) @@ -2557,6 +2555,7 @@ static int load_balance(int this_cpu, struct rq *this_rq, unsigned long imbalance; struct rq *busiest; cpumask_t cpus = CPU_MASK_ALL; + unsigned long flags; /* * When power savings policy is enabled for the parent domain, idle @@ -2596,11 +2595,13 @@ redo: * still unbalanced. nr_moved simply stays zero, so it is * correctly treated as an imbalance. */ + local_irq_save(flags); double_rq_lock(this_rq, busiest); nr_moved = move_tasks(this_rq, this_cpu, busiest, minus_1_or_zero(busiest->nr_running), imbalance, sd, idle, &all_pinned); double_rq_unlock(this_rq, busiest); + local_irq_restore(flags); /* All tasks on this runqueue were pinned by CPU affinity */ if (unlikely(all_pinned)) { @@ -2617,13 +2618,13 @@ redo: if (unlikely(sd->nr_balance_failed > sd->cache_nice_tries+2)) { - spin_lock(&busiest->lock); + spin_lock_irqsave(&busiest->lock, flags); /* don't kick the migration_thread, if the curr * task on busiest cpu can't be moved to this_cpu */ if (!cpu_isset(this_cpu, busiest->curr->cpus_allowed)) { - spin_unlock(&busiest->lock); + spin_unlock_irqrestore(&busiest->lock, flags); all_pinned = 1; goto out_one_pinned; } @@ -2633,7 +2634,7 @@ redo: busiest->push_cpu = this_cpu; active_balance = 1; } - spin_unlock(&busiest->lock); + spin_unlock_irqrestore(&busiest->lock, flags); if (active_balance) wake_up_process(busiest->migration_thread); |