diff options
author | Peter Zijlstra <a.p.zijlstra@chello.nl> | 2008-06-27 13:41:27 +0200 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2008-06-27 14:31:40 +0200 |
commit | bb3469ac9b50f14ad6eba129ca0ad4fd033097a0 (patch) | |
tree | e5c6c6563778593f53fca642675b4c3ba5603ed2 /kernel/sched_fair.c | |
parent | a8a51d5e59561aa5b4d66e19eca819b537783e8f (diff) | |
download | kernel-crypto-bb3469ac9b50f14ad6eba129ca0ad4fd033097a0.tar.gz kernel-crypto-bb3469ac9b50f14ad6eba129ca0ad4fd033097a0.tar.xz kernel-crypto-bb3469ac9b50f14ad6eba129ca0ad4fd033097a0.zip |
sched: hierarchical load vs affine wakeups
With hierarchical grouping we can't just compare task weight to rq weight - we
need to scale the weight appropriately.
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Srivatsa Vaddagiri <vatsa@linux.vnet.ibm.com>
Cc: Mike Galbraith <efault@gmx.de>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/sched_fair.c')
-rw-r--r-- | kernel/sched_fair.c | 23 |
1 files changed, 21 insertions, 2 deletions
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c index 7b8d664d6f2..865cb53a7cc 100644 --- a/kernel/sched_fair.c +++ b/kernel/sched_fair.c @@ -1073,6 +1073,25 @@ static inline int wake_idle(int cpu, struct task_struct *p) static const struct sched_class fair_sched_class; +#ifdef CONFIG_FAIR_GROUP_SCHED +static unsigned long task_h_load(struct task_struct *p) +{ + unsigned long h_load = p->se.load.weight; + struct cfs_rq *cfs_rq = cfs_rq_of(&p->se); + + update_h_load(task_cpu(p)); + + h_load = calc_delta_mine(h_load, cfs_rq->h_load, &cfs_rq->load); + + return h_load; +} +#else +static unsigned long task_h_load(struct task_struct *p) +{ + return p->se.load.weight; +} +#endif + static int wake_affine(struct rq *rq, struct sched_domain *this_sd, struct rq *this_rq, struct task_struct *p, int prev_cpu, int this_cpu, int sync, @@ -1093,9 +1112,9 @@ wake_affine(struct rq *rq, struct sched_domain *this_sd, struct rq *this_rq, * of the current CPU: */ if (sync) - tl -= current->se.load.weight; + tl -= task_h_load(current); - balanced = 100*(tl + p->se.load.weight) <= imbalance*load; + balanced = 100*(tl + task_h_load(p)) <= imbalance*load; /* * If the currently running task will sleep within |