sched: incremental effective_load()
Increase the accuracy of the effective_load values. Not only consider the current increment (as per the attempted wakeup), but also consider the delta between when we last adjusted the shares and the current situation. Signed-off-by: Peter Zijlstra <peterz@infradead.org> Cc: Srivatsa Vaddagiri <vatsa@linux.vnet.ibm.com> Cc: Mike Galbraith <efault@gmx.de> Signed-off-by: Ingo Molnar <mingo@elte.hu>
This commit is contained in:
committed by
Ingo Molnar
parent
83378269a5
commit
f1d239f732
@@ -427,6 +427,11 @@ struct cfs_rq {
|
|||||||
* this cpu's part of tg->shares
|
* this cpu's part of tg->shares
|
||||||
*/
|
*/
|
||||||
unsigned long shares;
|
unsigned long shares;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* load.weight at the time we set shares
|
||||||
|
*/
|
||||||
|
unsigned long rq_weight;
|
||||||
#endif
|
#endif
|
||||||
#endif
|
#endif
|
||||||
};
|
};
|
||||||
@@ -1527,6 +1532,7 @@ __update_group_shares_cpu(struct task_group *tg, int cpu,
|
|||||||
* record the actual number of shares, not the boosted amount.
|
* record the actual number of shares, not the boosted amount.
|
||||||
*/
|
*/
|
||||||
tg->cfs_rq[cpu]->shares = boost ? 0 : shares;
|
tg->cfs_rq[cpu]->shares = boost ? 0 : shares;
|
||||||
|
tg->cfs_rq[cpu]->rq_weight = rq_weight;
|
||||||
|
|
||||||
if (shares < MIN_SHARES)
|
if (shares < MIN_SHARES)
|
||||||
shares = MIN_SHARES;
|
shares = MIN_SHARES;
|
||||||
|
@@ -1074,10 +1074,22 @@ static inline int wake_idle(int cpu, struct task_struct *p)
|
|||||||
static const struct sched_class fair_sched_class;
|
static const struct sched_class fair_sched_class;
|
||||||
|
|
||||||
#ifdef CONFIG_FAIR_GROUP_SCHED
|
#ifdef CONFIG_FAIR_GROUP_SCHED
|
||||||
static unsigned long effective_load(struct task_group *tg, int cpu,
|
static long effective_load(struct task_group *tg, int cpu,
|
||||||
unsigned long wl, unsigned long wg)
|
long wl, long wg)
|
||||||
{
|
{
|
||||||
struct sched_entity *se = tg->se[cpu];
|
struct sched_entity *se = tg->se[cpu];
|
||||||
|
long more_w;
|
||||||
|
|
||||||
|
if (!tg->parent)
|
||||||
|
return wl;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Instead of using this increment, also add the difference
|
||||||
|
* between when the shares were last updated and now.
|
||||||
|
*/
|
||||||
|
more_w = se->my_q->load.weight - se->my_q->rq_weight;
|
||||||
|
wl += more_w;
|
||||||
|
wg += more_w;
|
||||||
|
|
||||||
for_each_sched_entity(se) {
|
for_each_sched_entity(se) {
|
||||||
#define D(n) (likely(n) ? (n) : 1)
|
#define D(n) (likely(n) ? (n) : 1)
|
||||||
@@ -1086,7 +1098,7 @@ static unsigned long effective_load(struct task_group *tg, int cpu,
|
|||||||
|
|
||||||
S = se->my_q->tg->shares;
|
S = se->my_q->tg->shares;
|
||||||
s = se->my_q->shares;
|
s = se->my_q->shares;
|
||||||
rw = se->my_q->load.weight;
|
rw = se->my_q->rq_weight;
|
||||||
|
|
||||||
a = S*(rw + wl);
|
a = S*(rw + wl);
|
||||||
b = S*rw + s*wg;
|
b = S*rw + s*wg;
|
||||||
|
Reference in New Issue
Block a user