sched: Optimize cgroup vs wakeup a bit
We don't need to call update_shares() for each domain we iterate, just got the largets one. However, we should call it before wake_affine() as well, so that that can use up-to-date values too. Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> LKML-Reference: <new-submission> Signed-off-by: Ingo Molnar <mingo@elte.hu>
This commit is contained in:
committed by
Ingo Molnar
parent
7c423e9885
commit
3b64089422
@@ -376,13 +376,6 @@ static inline void set_task_rq(struct task_struct *p, unsigned int cpu)
|
|||||||
|
|
||||||
#else
|
#else
|
||||||
|
|
||||||
#ifdef CONFIG_SMP
|
|
||||||
static int root_task_group_empty(void)
|
|
||||||
{
|
|
||||||
return 1;
|
|
||||||
}
|
|
||||||
#endif
|
|
||||||
|
|
||||||
static inline void set_task_rq(struct task_struct *p, unsigned int cpu) { }
|
static inline void set_task_rq(struct task_struct *p, unsigned int cpu) { }
|
||||||
static inline struct task_group *task_group(struct task_struct *p)
|
static inline struct task_group *task_group(struct task_struct *p)
|
||||||
{
|
{
|
||||||
|
@@ -1348,7 +1348,7 @@ find_idlest_cpu(struct sched_group *group, struct task_struct *p, int this_cpu)
|
|||||||
*/
|
*/
|
||||||
static int select_task_rq_fair(struct task_struct *p, int sd_flag, int flags)
|
static int select_task_rq_fair(struct task_struct *p, int sd_flag, int flags)
|
||||||
{
|
{
|
||||||
struct sched_domain *tmp, *sd = NULL;
|
struct sched_domain *tmp, *shares = NULL, *sd = NULL;
|
||||||
int cpu = smp_processor_id();
|
int cpu = smp_processor_id();
|
||||||
int prev_cpu = task_cpu(p);
|
int prev_cpu = task_cpu(p);
|
||||||
int new_cpu = cpu;
|
int new_cpu = cpu;
|
||||||
@@ -1387,22 +1387,14 @@ static int select_task_rq_fair(struct task_struct *p, int sd_flag, int flags)
|
|||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
switch (sd_flag) {
|
|
||||||
case SD_BALANCE_WAKE:
|
|
||||||
if (!sched_feat(LB_WAKEUP_UPDATE))
|
|
||||||
break;
|
|
||||||
case SD_BALANCE_FORK:
|
|
||||||
case SD_BALANCE_EXEC:
|
|
||||||
if (root_task_group_empty())
|
|
||||||
break;
|
|
||||||
update_shares(tmp);
|
|
||||||
default:
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (want_affine && (tmp->flags & SD_WAKE_AFFINE) &&
|
if (want_affine && (tmp->flags & SD_WAKE_AFFINE) &&
|
||||||
cpumask_test_cpu(prev_cpu, sched_domain_span(tmp))) {
|
cpumask_test_cpu(prev_cpu, sched_domain_span(tmp))) {
|
||||||
|
|
||||||
|
if (sched_feat(LB_SHARES_UPDATE)) {
|
||||||
|
update_shares(tmp);
|
||||||
|
shares = tmp;
|
||||||
|
}
|
||||||
|
|
||||||
if (wake_affine(tmp, p, sync)) {
|
if (wake_affine(tmp, p, sync)) {
|
||||||
new_cpu = cpu;
|
new_cpu = cpu;
|
||||||
goto out;
|
goto out;
|
||||||
@@ -1417,6 +1409,9 @@ static int select_task_rq_fair(struct task_struct *p, int sd_flag, int flags)
|
|||||||
sd = tmp;
|
sd = tmp;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (sd && sd != shares && sched_feat(LB_SHARES_UPDATE))
|
||||||
|
update_shares(sd);
|
||||||
|
|
||||||
while (sd) {
|
while (sd) {
|
||||||
struct sched_group *group;
|
struct sched_group *group;
|
||||||
int weight;
|
int weight;
|
||||||
|
@@ -107,7 +107,7 @@ SCHED_FEAT(ARCH_POWER, 0)
|
|||||||
SCHED_FEAT(HRTICK, 0)
|
SCHED_FEAT(HRTICK, 0)
|
||||||
SCHED_FEAT(DOUBLE_TICK, 0)
|
SCHED_FEAT(DOUBLE_TICK, 0)
|
||||||
SCHED_FEAT(LB_BIAS, 1)
|
SCHED_FEAT(LB_BIAS, 1)
|
||||||
SCHED_FEAT(LB_WAKEUP_UPDATE, 1)
|
SCHED_FEAT(LB_SHARES_UPDATE, 1)
|
||||||
SCHED_FEAT(ASYM_EFF_LOAD, 1)
|
SCHED_FEAT(ASYM_EFF_LOAD, 1)
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
Reference in New Issue
Block a user