sched: Make tg_shares_up() walk on-demand

Make tg_shares_up() use the active cgroup list, this means we cannot
do a strict bottom-up walk of the hierarchy, but assuming its a very
wide tree with a small number of active groups it should be a win.

Signed-off-by: Paul Turner <pjt@google.com>
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
LKML-Reference: <20101115234937.754159484@google.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
This commit is contained in:
Peter Zijlstra
2010-11-15 15:47:02 -08:00
committed by Ingo Molnar
parent 3d4b47b4b0
commit 9e3081ca61
2 changed files with 58 additions and 67 deletions

View File

@@ -2004,6 +2004,60 @@ out:
}
#ifdef CONFIG_FAIR_GROUP_SCHED
/*
* update tg->load_weight by folding this cpu's load_avg
*/
static int tg_shares_up(struct task_group *tg, int cpu)
{
struct cfs_rq *cfs_rq;
unsigned long flags;
struct rq *rq;
long load_avg;
if (!tg->se[cpu])
return 0;
rq = cpu_rq(cpu);
cfs_rq = tg->cfs_rq[cpu];
raw_spin_lock_irqsave(&rq->lock, flags);
update_rq_clock(rq);
update_cfs_load(cfs_rq, 1);
load_avg = div64_u64(cfs_rq->load_avg, cfs_rq->load_period+1);
load_avg -= cfs_rq->load_contribution;
atomic_add(load_avg, &tg->load_weight);
cfs_rq->load_contribution += load_avg;
/*
* We need to update shares after updating tg->load_weight in
* order to adjust the weight of groups with long running tasks.
*/
update_cfs_shares(cfs_rq);
raw_spin_unlock_irqrestore(&rq->lock, flags);
return 0;
}
static void update_shares(int cpu)
{
struct cfs_rq *cfs_rq;
struct rq *rq = cpu_rq(cpu);
rcu_read_lock();
for_each_leaf_cfs_rq(rq, cfs_rq) {
struct task_group *tg = cfs_rq->tg;
do {
tg_shares_up(tg, cpu);
tg = tg->parent;
} while (tg);
}
rcu_read_unlock();
}
static unsigned long
load_balance_fair(struct rq *this_rq, int this_cpu, struct rq *busiest,
unsigned long max_load_move,
@@ -2051,6 +2105,10 @@ load_balance_fair(struct rq *this_rq, int this_cpu, struct rq *busiest,
return max_load_move - rem_load_move;
}
#else
static inline void update_shares(int cpu)
{
}
static unsigned long
load_balance_fair(struct rq *this_rq, int this_cpu, struct rq *busiest,
unsigned long max_load_move,