diff options
author | Mike Galbraith <efault@gmx.de> | 2008-05-29 11:11:41 +0200 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2008-05-29 11:29:20 +0200 |
commit | b3137bc8e77962a8e3b4dfdc1bcfd38e437bd278 (patch) | |
tree | 5131501b5575f933074cc89545ff997d277d1d57 /kernel | |
parent | a381759d6ad5c5dea5a981918e0b4493e9b66ac7 (diff) | |
download | kernel-crypto-b3137bc8e77962a8e3b4dfdc1bcfd38e437bd278.tar.gz kernel-crypto-b3137bc8e77962a8e3b4dfdc1bcfd38e437bd278.tar.xz kernel-crypto-b3137bc8e77962a8e3b4dfdc1bcfd38e437bd278.zip |
sched: stop wake_affine from causing serious imbalance
Prevent short-running wakers of short-running threads from overloading a single
cpu via wakeup affinity, and wire up disconnected debug option.
Signed-off-by: Mike Galbraith <efault@gmx.de>
Signed-off-by: Peter Zijlstra <peterz@infradead.org>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/sched_fair.c | 25 |
1 files changed, 14 insertions, 11 deletions
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c index f0f25fc12d0..08ae848b71d 100644 --- a/kernel/sched_fair.c +++ b/kernel/sched_fair.c @@ -996,16 +996,27 @@ wake_affine(struct rq *rq, struct sched_domain *this_sd, struct rq *this_rq, struct task_struct *curr = this_rq->curr; unsigned long tl = this_load; unsigned long tl_per_task; + int balanced; - if (!(this_sd->flags & SD_WAKE_AFFINE)) + if (!(this_sd->flags & SD_WAKE_AFFINE) || !sched_feat(AFFINE_WAKEUPS)) return 0; /* + * If sync wakeup then subtract the (maximum possible) + * effect of the currently running task from the load + * of the current CPU: + */ + if (sync) + tl -= current->se.load.weight; + + balanced = 100*(tl + p->se.load.weight) <= imbalance*load; + + /* * If the currently running task will sleep within * a reasonable amount of time then attract this newly * woken task: */ - if (sync && curr->sched_class == &fair_sched_class) { + if (sync && balanced && curr->sched_class == &fair_sched_class) { if (curr->se.avg_overlap < sysctl_sched_migration_cost && p->se.avg_overlap < sysctl_sched_migration_cost) return 1; @@ -1014,16 +1025,8 @@ wake_affine(struct rq *rq, struct sched_domain *this_sd, struct rq *this_rq, schedstat_inc(p, se.nr_wakeups_affine_attempts); tl_per_task = cpu_avg_load_per_task(this_cpu); - /* - * If sync wakeup then subtract the (maximum possible) - * effect of the currently running task from the load - * of the current CPU: - */ - if (sync) - tl -= current->se.load.weight; - if ((tl <= load && tl + target_load(prev_cpu, idx) <= tl_per_task) || - 100*(tl + p->se.load.weight) <= imbalance*load) { + balanced) { /* * This domain has SD_WAKE_AFFINE and * p is cache cold in this domain, and |