sched: Allow update_cfs_load() to update global load
Refactor the global load updates from update_shares_cpu() so that update_cfs_load() can update global load when it is more than ~10% out of sync. The new global_load parameter allows us to force an update, regardless of the error factor so that we can synchronize w/ update_shares(). Signed-off-by: Paul Turner <pjt@google.com> Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> LKML-Reference: <20101115234938.377473595@google.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
This commit is contained in:
@@ -539,7 +539,7 @@ static u64 sched_vslice(struct cfs_rq *cfs_rq, struct sched_entity *se)
|
|||||||
return calc_delta_fair(sched_slice(cfs_rq, se), se);
|
return calc_delta_fair(sched_slice(cfs_rq, se), se);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void update_cfs_load(struct cfs_rq *cfs_rq);
|
static void update_cfs_load(struct cfs_rq *cfs_rq, int global_update);
|
||||||
static void update_cfs_shares(struct cfs_rq *cfs_rq, long weight_delta);
|
static void update_cfs_shares(struct cfs_rq *cfs_rq, long weight_delta);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@@ -565,7 +565,7 @@ __update_curr(struct cfs_rq *cfs_rq, struct sched_entity *curr,
|
|||||||
#ifdef CONFIG_FAIR_GROUP_SCHED
|
#ifdef CONFIG_FAIR_GROUP_SCHED
|
||||||
cfs_rq->load_unacc_exec_time += delta_exec;
|
cfs_rq->load_unacc_exec_time += delta_exec;
|
||||||
if (cfs_rq->load_unacc_exec_time > sysctl_sched_shares_window) {
|
if (cfs_rq->load_unacc_exec_time > sysctl_sched_shares_window) {
|
||||||
update_cfs_load(cfs_rq);
|
update_cfs_load(cfs_rq, 0);
|
||||||
update_cfs_shares(cfs_rq, 0);
|
update_cfs_shares(cfs_rq, 0);
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
@@ -704,7 +704,22 @@ account_entity_dequeue(struct cfs_rq *cfs_rq, struct sched_entity *se)
|
|||||||
}
|
}
|
||||||
|
|
||||||
#if defined CONFIG_SMP && defined CONFIG_FAIR_GROUP_SCHED
|
#if defined CONFIG_SMP && defined CONFIG_FAIR_GROUP_SCHED
|
||||||
static void update_cfs_load(struct cfs_rq *cfs_rq)
|
static void update_cfs_rq_load_contribution(struct cfs_rq *cfs_rq,
|
||||||
|
int global_update)
|
||||||
|
{
|
||||||
|
struct task_group *tg = cfs_rq->tg;
|
||||||
|
long load_avg;
|
||||||
|
|
||||||
|
load_avg = div64_u64(cfs_rq->load_avg, cfs_rq->load_period+1);
|
||||||
|
load_avg -= cfs_rq->load_contribution;
|
||||||
|
|
||||||
|
if (global_update || abs(load_avg) > cfs_rq->load_contribution / 8) {
|
||||||
|
atomic_add(load_avg, &tg->load_weight);
|
||||||
|
cfs_rq->load_contribution += load_avg;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
static void update_cfs_load(struct cfs_rq *cfs_rq, int global_update)
|
||||||
{
|
{
|
||||||
u64 period = sysctl_sched_shares_window;
|
u64 period = sysctl_sched_shares_window;
|
||||||
u64 now, delta;
|
u64 now, delta;
|
||||||
@@ -731,6 +746,11 @@ static void update_cfs_load(struct cfs_rq *cfs_rq)
|
|||||||
cfs_rq->load_avg += delta * load;
|
cfs_rq->load_avg += delta * load;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* consider updating load contribution on each fold or truncate */
|
||||||
|
if (global_update || cfs_rq->load_period > period
|
||||||
|
|| !cfs_rq->load_period)
|
||||||
|
update_cfs_rq_load_contribution(cfs_rq, global_update);
|
||||||
|
|
||||||
while (cfs_rq->load_period > period) {
|
while (cfs_rq->load_period > period) {
|
||||||
/*
|
/*
|
||||||
* Inline assembly required to prevent the compiler
|
* Inline assembly required to prevent the compiler
|
||||||
@@ -790,7 +810,7 @@ static void update_cfs_shares(struct cfs_rq *cfs_rq, long weight_delta)
|
|||||||
reweight_entity(cfs_rq_of(se), se, shares);
|
reweight_entity(cfs_rq_of(se), se, shares);
|
||||||
}
|
}
|
||||||
#else /* CONFIG_FAIR_GROUP_SCHED */
|
#else /* CONFIG_FAIR_GROUP_SCHED */
|
||||||
static inline void update_cfs_load(struct cfs_rq *cfs_rq)
|
static void update_cfs_load(struct cfs_rq *cfs_rq, int global_update)
|
||||||
{
|
{
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -920,7 +940,7 @@ enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
|
|||||||
* Update run-time statistics of the 'current'.
|
* Update run-time statistics of the 'current'.
|
||||||
*/
|
*/
|
||||||
update_curr(cfs_rq);
|
update_curr(cfs_rq);
|
||||||
update_cfs_load(cfs_rq);
|
update_cfs_load(cfs_rq, 0);
|
||||||
update_cfs_shares(cfs_rq, se->load.weight);
|
update_cfs_shares(cfs_rq, se->load.weight);
|
||||||
account_entity_enqueue(cfs_rq, se);
|
account_entity_enqueue(cfs_rq, se);
|
||||||
|
|
||||||
@@ -981,7 +1001,7 @@ dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
|
|||||||
if (se != cfs_rq->curr)
|
if (se != cfs_rq->curr)
|
||||||
__dequeue_entity(cfs_rq, se);
|
__dequeue_entity(cfs_rq, se);
|
||||||
se->on_rq = 0;
|
se->on_rq = 0;
|
||||||
update_cfs_load(cfs_rq);
|
update_cfs_load(cfs_rq, 0);
|
||||||
account_entity_dequeue(cfs_rq, se);
|
account_entity_dequeue(cfs_rq, se);
|
||||||
update_min_vruntime(cfs_rq);
|
update_min_vruntime(cfs_rq);
|
||||||
update_cfs_shares(cfs_rq, 0);
|
update_cfs_shares(cfs_rq, 0);
|
||||||
@@ -1216,7 +1236,7 @@ enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags)
|
|||||||
for_each_sched_entity(se) {
|
for_each_sched_entity(se) {
|
||||||
struct cfs_rq *cfs_rq = cfs_rq_of(se);
|
struct cfs_rq *cfs_rq = cfs_rq_of(se);
|
||||||
|
|
||||||
update_cfs_load(cfs_rq);
|
update_cfs_load(cfs_rq, 0);
|
||||||
update_cfs_shares(cfs_rq, 0);
|
update_cfs_shares(cfs_rq, 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -1246,7 +1266,7 @@ static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int flags)
|
|||||||
for_each_sched_entity(se) {
|
for_each_sched_entity(se) {
|
||||||
struct cfs_rq *cfs_rq = cfs_rq_of(se);
|
struct cfs_rq *cfs_rq = cfs_rq_of(se);
|
||||||
|
|
||||||
update_cfs_load(cfs_rq);
|
update_cfs_load(cfs_rq, 0);
|
||||||
update_cfs_shares(cfs_rq, 0);
|
update_cfs_shares(cfs_rq, 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -2052,7 +2072,6 @@ static int update_shares_cpu(struct task_group *tg, int cpu)
|
|||||||
struct cfs_rq *cfs_rq;
|
struct cfs_rq *cfs_rq;
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
struct rq *rq;
|
struct rq *rq;
|
||||||
long load_avg;
|
|
||||||
|
|
||||||
if (!tg->se[cpu])
|
if (!tg->se[cpu])
|
||||||
return 0;
|
return 0;
|
||||||
@@ -2063,12 +2082,7 @@ static int update_shares_cpu(struct task_group *tg, int cpu)
|
|||||||
raw_spin_lock_irqsave(&rq->lock, flags);
|
raw_spin_lock_irqsave(&rq->lock, flags);
|
||||||
|
|
||||||
update_rq_clock(rq);
|
update_rq_clock(rq);
|
||||||
update_cfs_load(cfs_rq);
|
update_cfs_load(cfs_rq, 1);
|
||||||
|
|
||||||
load_avg = div64_u64(cfs_rq->load_avg, cfs_rq->load_period+1);
|
|
||||||
load_avg -= cfs_rq->load_contribution;
|
|
||||||
atomic_add(load_avg, &tg->load_weight);
|
|
||||||
cfs_rq->load_contribution += load_avg;
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* We need to update shares after updating tg->load_weight in
|
* We need to update shares after updating tg->load_weight in
|
||||||
|
Reference in New Issue
Block a user