sched: Make tg_shares_up() walk on-demand
Make tg_shares_up() use the active cgroup list, this means we cannot do a strict bottom-up walk of the hierarchy, but assuming its a very wide tree with a small number of active groups it should be a win. Signed-off-by: Paul Turner <pjt@google.com> Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> LKML-Reference: <20101115234937.754159484@google.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
This commit is contained in:
committed by
Ingo Molnar
parent
3d4b47b4b0
commit
9e3081ca61
@@ -279,13 +279,6 @@ static DEFINE_SPINLOCK(task_group_lock);
|
|||||||
|
|
||||||
#ifdef CONFIG_FAIR_GROUP_SCHED
|
#ifdef CONFIG_FAIR_GROUP_SCHED
|
||||||
|
|
||||||
#ifdef CONFIG_SMP
|
|
||||||
static int root_task_group_empty(void)
|
|
||||||
{
|
|
||||||
return list_empty(&root_task_group.children);
|
|
||||||
}
|
|
||||||
#endif
|
|
||||||
|
|
||||||
# define INIT_TASK_GROUP_LOAD NICE_0_LOAD
|
# define INIT_TASK_GROUP_LOAD NICE_0_LOAD
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@@ -1546,48 +1539,6 @@ static unsigned long cpu_avg_load_per_task(int cpu)
|
|||||||
|
|
||||||
#ifdef CONFIG_FAIR_GROUP_SCHED
|
#ifdef CONFIG_FAIR_GROUP_SCHED
|
||||||
|
|
||||||
static void update_cfs_load(struct cfs_rq *cfs_rq, int lb);
|
|
||||||
static void update_cfs_shares(struct cfs_rq *cfs_rq);
|
|
||||||
|
|
||||||
/*
|
|
||||||
* update tg->load_weight by folding this cpu's load_avg
|
|
||||||
*/
|
|
||||||
static int tg_shares_up(struct task_group *tg, void *data)
|
|
||||||
{
|
|
||||||
long load_avg;
|
|
||||||
struct cfs_rq *cfs_rq;
|
|
||||||
unsigned long flags;
|
|
||||||
int cpu = (long)data;
|
|
||||||
struct rq *rq;
|
|
||||||
|
|
||||||
if (!tg->se[cpu])
|
|
||||||
return 0;
|
|
||||||
|
|
||||||
rq = cpu_rq(cpu);
|
|
||||||
cfs_rq = tg->cfs_rq[cpu];
|
|
||||||
|
|
||||||
raw_spin_lock_irqsave(&rq->lock, flags);
|
|
||||||
|
|
||||||
update_rq_clock(rq);
|
|
||||||
update_cfs_load(cfs_rq, 1);
|
|
||||||
|
|
||||||
load_avg = div64_u64(cfs_rq->load_avg, cfs_rq->load_period+1);
|
|
||||||
load_avg -= cfs_rq->load_contribution;
|
|
||||||
|
|
||||||
atomic_add(load_avg, &tg->load_weight);
|
|
||||||
cfs_rq->load_contribution += load_avg;
|
|
||||||
|
|
||||||
/*
|
|
||||||
* We need to update shares after updating tg->load_weight in
|
|
||||||
* order to adjust the weight of groups with long running tasks.
|
|
||||||
*/
|
|
||||||
update_cfs_shares(cfs_rq);
|
|
||||||
|
|
||||||
raw_spin_unlock_irqrestore(&rq->lock, flags);
|
|
||||||
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Compute the cpu's hierarchical load factor for each task group.
|
* Compute the cpu's hierarchical load factor for each task group.
|
||||||
* This needs to be done in a top-down fashion because the load of a child
|
* This needs to be done in a top-down fashion because the load of a child
|
||||||
@@ -1611,29 +1562,11 @@ static int tg_load_down(struct task_group *tg, void *data)
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void update_shares(long cpu)
|
|
||||||
{
|
|
||||||
if (root_task_group_empty())
|
|
||||||
return;
|
|
||||||
|
|
||||||
/*
|
|
||||||
* XXX: replace with an on-demand list
|
|
||||||
*/
|
|
||||||
|
|
||||||
walk_tg_tree(tg_nop, tg_shares_up, (void *)cpu);
|
|
||||||
}
|
|
||||||
|
|
||||||
static void update_h_load(long cpu)
|
static void update_h_load(long cpu)
|
||||||
{
|
{
|
||||||
walk_tg_tree(tg_load_down, tg_nop, (void *)cpu);
|
walk_tg_tree(tg_load_down, tg_nop, (void *)cpu);
|
||||||
}
|
}
|
||||||
|
|
||||||
#else
|
|
||||||
|
|
||||||
static inline void update_shares(int cpu)
|
|
||||||
{
|
|
||||||
}
|
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#ifdef CONFIG_PREEMPT
|
#ifdef CONFIG_PREEMPT
|
||||||
|
@@ -2004,6 +2004,60 @@ out:
|
|||||||
}
|
}
|
||||||
|
|
||||||
#ifdef CONFIG_FAIR_GROUP_SCHED
|
#ifdef CONFIG_FAIR_GROUP_SCHED
|
||||||
|
/*
|
||||||
|
* update tg->load_weight by folding this cpu's load_avg
|
||||||
|
*/
|
||||||
|
static int tg_shares_up(struct task_group *tg, int cpu)
|
||||||
|
{
|
||||||
|
struct cfs_rq *cfs_rq;
|
||||||
|
unsigned long flags;
|
||||||
|
struct rq *rq;
|
||||||
|
long load_avg;
|
||||||
|
|
||||||
|
if (!tg->se[cpu])
|
||||||
|
return 0;
|
||||||
|
|
||||||
|
rq = cpu_rq(cpu);
|
||||||
|
cfs_rq = tg->cfs_rq[cpu];
|
||||||
|
|
||||||
|
raw_spin_lock_irqsave(&rq->lock, flags);
|
||||||
|
|
||||||
|
update_rq_clock(rq);
|
||||||
|
update_cfs_load(cfs_rq, 1);
|
||||||
|
|
||||||
|
load_avg = div64_u64(cfs_rq->load_avg, cfs_rq->load_period+1);
|
||||||
|
load_avg -= cfs_rq->load_contribution;
|
||||||
|
atomic_add(load_avg, &tg->load_weight);
|
||||||
|
cfs_rq->load_contribution += load_avg;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* We need to update shares after updating tg->load_weight in
|
||||||
|
* order to adjust the weight of groups with long running tasks.
|
||||||
|
*/
|
||||||
|
update_cfs_shares(cfs_rq);
|
||||||
|
|
||||||
|
raw_spin_unlock_irqrestore(&rq->lock, flags);
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
static void update_shares(int cpu)
|
||||||
|
{
|
||||||
|
struct cfs_rq *cfs_rq;
|
||||||
|
struct rq *rq = cpu_rq(cpu);
|
||||||
|
|
||||||
|
rcu_read_lock();
|
||||||
|
for_each_leaf_cfs_rq(rq, cfs_rq) {
|
||||||
|
struct task_group *tg = cfs_rq->tg;
|
||||||
|
|
||||||
|
do {
|
||||||
|
tg_shares_up(tg, cpu);
|
||||||
|
tg = tg->parent;
|
||||||
|
} while (tg);
|
||||||
|
}
|
||||||
|
rcu_read_unlock();
|
||||||
|
}
|
||||||
|
|
||||||
static unsigned long
|
static unsigned long
|
||||||
load_balance_fair(struct rq *this_rq, int this_cpu, struct rq *busiest,
|
load_balance_fair(struct rq *this_rq, int this_cpu, struct rq *busiest,
|
||||||
unsigned long max_load_move,
|
unsigned long max_load_move,
|
||||||
@@ -2051,6 +2105,10 @@ load_balance_fair(struct rq *this_rq, int this_cpu, struct rq *busiest,
|
|||||||
return max_load_move - rem_load_move;
|
return max_load_move - rem_load_move;
|
||||||
}
|
}
|
||||||
#else
|
#else
|
||||||
|
static inline void update_shares(int cpu)
|
||||||
|
{
|
||||||
|
}
|
||||||
|
|
||||||
static unsigned long
|
static unsigned long
|
||||||
load_balance_fair(struct rq *this_rq, int this_cpu, struct rq *busiest,
|
load_balance_fair(struct rq *this_rq, int this_cpu, struct rq *busiest,
|
||||||
unsigned long max_load_move,
|
unsigned long max_load_move,
|
||||||
|
Reference in New Issue
Block a user