sched: Introduce SCHED_POWER_SCALE to scale cpu_power calculations

SCHED_LOAD_SCALE is used to increase nice resolution and to
scale cpu_power calculations in the scheduler. This patch
introduces SCHED_POWER_SCALE and converts all uses of
SCHED_LOAD_SCALE for scaling cpu_power to use SCHED_POWER_SCALE
instead.

This is a preparatory patch for increasing the resolution of
SCHED_LOAD_SCALE, and there is no need to increase resolution
for cpu_power calculations.

Signed-off-by: Nikhil Rao <ncrao@google.com>
Acked-by: Peter Zijlstra <peterz@infradead.org>
Cc: Nikunj A. Dadhania <nikunj@linux.vnet.ibm.com>
Cc: Srivatsa Vaddagiri <vatsa@linux.vnet.ibm.com>
Cc: Stephan Barwolf <stephan.baerwolf@tu-ilmenau.de>
Cc: Mike Galbraith <efault@gmx.de>
Link: http://lkml.kernel.org/r/1305738580-9924-3-git-send-email-ncrao@google.com
Signed-off-by: Ingo Molnar <mingo@elte.hu>
This commit is contained in:
Nikhil Rao
2011-05-18 10:09:39 -07:00
committed by Ingo Molnar
parent f05998d4b8
commit 1399fa7807
3 changed files with 37 additions and 32 deletions

View File

@ -1584,7 +1584,7 @@ find_idlest_group(struct sched_domain *sd, struct task_struct *p,
}
/* Adjust by relative CPU power of the group */
avg_load = (avg_load * SCHED_LOAD_SCALE) / group->cpu_power;
avg_load = (avg_load * SCHED_POWER_SCALE) / group->cpu_power;
if (local_group) {
this_load = avg_load;
@ -1722,7 +1722,7 @@ select_task_rq_fair(struct task_struct *p, int sd_flag, int wake_flags)
nr_running += cpu_rq(i)->cfs.nr_running;
}
capacity = DIV_ROUND_CLOSEST(power, SCHED_LOAD_SCALE);
capacity = DIV_ROUND_CLOSEST(power, SCHED_POWER_SCALE);
if (tmp->flags & SD_POWERSAVINGS_BALANCE)
nr_running /= 2;
@ -2570,7 +2570,7 @@ static inline int check_power_save_busiest_group(struct sd_lb_stats *sds,
unsigned long default_scale_freq_power(struct sched_domain *sd, int cpu)
{
return SCHED_LOAD_SCALE;
return SCHED_POWER_SCALE;
}
unsigned long __weak arch_scale_freq_power(struct sched_domain *sd, int cpu)
@ -2607,10 +2607,10 @@ unsigned long scale_rt_power(int cpu)
available = total - rq->rt_avg;
}
if (unlikely((s64)total < SCHED_LOAD_SCALE))
total = SCHED_LOAD_SCALE;
if (unlikely((s64)total < SCHED_POWER_SCALE))
total = SCHED_POWER_SCALE;
total >>= SCHED_LOAD_SHIFT;
total >>= SCHED_POWER_SHIFT;
return div_u64(available, total);
}
@ -2618,7 +2618,7 @@ unsigned long scale_rt_power(int cpu)
static void update_cpu_power(struct sched_domain *sd, int cpu)
{
unsigned long weight = sd->span_weight;
unsigned long power = SCHED_LOAD_SCALE;
unsigned long power = SCHED_POWER_SCALE;
struct sched_group *sdg = sd->groups;
if ((sd->flags & SD_SHARE_CPUPOWER) && weight > 1) {
@ -2627,7 +2627,7 @@ static void update_cpu_power(struct sched_domain *sd, int cpu)
else
power *= default_scale_smt_power(sd, cpu);
power >>= SCHED_LOAD_SHIFT;
power >>= SCHED_POWER_SHIFT;
}
sdg->cpu_power_orig = power;
@ -2637,10 +2637,10 @@ static void update_cpu_power(struct sched_domain *sd, int cpu)
else
power *= default_scale_freq_power(sd, cpu);
power >>= SCHED_LOAD_SHIFT;
power >>= SCHED_POWER_SHIFT;
power *= scale_rt_power(cpu);
power >>= SCHED_LOAD_SHIFT;
power >>= SCHED_POWER_SHIFT;
if (!power)
power = 1;
@ -2682,7 +2682,7 @@ static inline int
fix_small_capacity(struct sched_domain *sd, struct sched_group *group)
{
/*
* Only siblings can have significantly less than SCHED_LOAD_SCALE
* Only siblings can have significantly less than SCHED_POWER_SCALE
*/
if (!(sd->flags & SD_SHARE_CPUPOWER))
return 0;
@ -2770,7 +2770,7 @@ static inline void update_sg_lb_stats(struct sched_domain *sd,
}
/* Adjust by relative CPU power of the group */
sgs->avg_load = (sgs->group_load * SCHED_LOAD_SCALE) / group->cpu_power;
sgs->avg_load = (sgs->group_load*SCHED_POWER_SCALE) / group->cpu_power;
/*
* Consider the group unbalanced when the imbalance is larger
@ -2787,7 +2787,8 @@ static inline void update_sg_lb_stats(struct sched_domain *sd,
if ((max_cpu_load - min_cpu_load) >= avg_load_per_task && max_nr_running > 1)
sgs->group_imb = 1;
sgs->group_capacity = DIV_ROUND_CLOSEST(group->cpu_power, SCHED_LOAD_SCALE);
sgs->group_capacity = DIV_ROUND_CLOSEST(group->cpu_power,
SCHED_POWER_SCALE);
if (!sgs->group_capacity)
sgs->group_capacity = fix_small_capacity(sd, group);
sgs->group_weight = group->group_weight;
@ -2961,7 +2962,7 @@ static int check_asym_packing(struct sched_domain *sd,
return 0;
*imbalance = DIV_ROUND_CLOSEST(sds->max_load * sds->busiest->cpu_power,
SCHED_LOAD_SCALE);
SCHED_POWER_SCALE);
return 1;
}
@ -2990,7 +2991,7 @@ static inline void fix_small_imbalance(struct sd_lb_stats *sds,
cpu_avg_load_per_task(this_cpu);
scaled_busy_load_per_task = sds->busiest_load_per_task
* SCHED_LOAD_SCALE;
* SCHED_POWER_SCALE;
scaled_busy_load_per_task /= sds->busiest->cpu_power;
if (sds->max_load - sds->this_load + scaled_busy_load_per_task >=
@ -3009,10 +3010,10 @@ static inline void fix_small_imbalance(struct sd_lb_stats *sds,
min(sds->busiest_load_per_task, sds->max_load);
pwr_now += sds->this->cpu_power *
min(sds->this_load_per_task, sds->this_load);
pwr_now /= SCHED_LOAD_SCALE;
pwr_now /= SCHED_POWER_SCALE;
/* Amount of load we'd subtract */
tmp = (sds->busiest_load_per_task * SCHED_LOAD_SCALE) /
tmp = (sds->busiest_load_per_task * SCHED_POWER_SCALE) /
sds->busiest->cpu_power;
if (sds->max_load > tmp)
pwr_move += sds->busiest->cpu_power *
@ -3020,15 +3021,15 @@ static inline void fix_small_imbalance(struct sd_lb_stats *sds,
/* Amount of load we'd add */
if (sds->max_load * sds->busiest->cpu_power <
sds->busiest_load_per_task * SCHED_LOAD_SCALE)
sds->busiest_load_per_task * SCHED_POWER_SCALE)
tmp = (sds->max_load * sds->busiest->cpu_power) /
sds->this->cpu_power;
else
tmp = (sds->busiest_load_per_task * SCHED_LOAD_SCALE) /
tmp = (sds->busiest_load_per_task * SCHED_POWER_SCALE) /
sds->this->cpu_power;
pwr_move += sds->this->cpu_power *
min(sds->this_load_per_task, sds->this_load + tmp);
pwr_move /= SCHED_LOAD_SCALE;
pwr_move /= SCHED_POWER_SCALE;
/* Move if we gain throughput */
if (pwr_move > pwr_now)
@ -3070,7 +3071,7 @@ static inline void calculate_imbalance(struct sd_lb_stats *sds, int this_cpu,
load_above_capacity = (sds->busiest_nr_running -
sds->busiest_group_capacity);
load_above_capacity *= (SCHED_LOAD_SCALE * SCHED_LOAD_SCALE);
load_above_capacity *= (SCHED_LOAD_SCALE * SCHED_POWER_SCALE);
load_above_capacity /= sds->busiest->cpu_power;
}
@ -3090,7 +3091,7 @@ static inline void calculate_imbalance(struct sd_lb_stats *sds, int this_cpu,
/* How much load to actually move to equalise the imbalance */
*imbalance = min(max_pull * sds->busiest->cpu_power,
(sds->avg_load - sds->this_load) * sds->this->cpu_power)
/ SCHED_LOAD_SCALE;
/ SCHED_POWER_SCALE;
/*
* if *imbalance is less than the average load per runnable task
@ -3159,7 +3160,7 @@ find_busiest_group(struct sched_domain *sd, int this_cpu,
if (!sds.busiest || sds.busiest_nr_running == 0)
goto out_balanced;
sds.avg_load = (SCHED_LOAD_SCALE * sds.total_load) / sds.total_pwr;
sds.avg_load = (SCHED_POWER_SCALE * sds.total_load) / sds.total_pwr;
/*
* If the busiest group is imbalanced the below checks don't
@ -3238,7 +3239,8 @@ find_busiest_queue(struct sched_domain *sd, struct sched_group *group,
for_each_cpu(i, sched_group_cpus(group)) {
unsigned long power = power_of(i);
unsigned long capacity = DIV_ROUND_CLOSEST(power, SCHED_LOAD_SCALE);
unsigned long capacity = DIV_ROUND_CLOSEST(power,
SCHED_POWER_SCALE);
unsigned long wl;
if (!capacity)
@ -3263,7 +3265,7 @@ find_busiest_queue(struct sched_domain *sd, struct sched_group *group,
* the load can be moved away from the cpu that is potentially
* running at a lower capacity.
*/
wl = (wl * SCHED_LOAD_SCALE) / power;
wl = (wl * SCHED_POWER_SCALE) / power;
if (wl > max_load) {
max_load = wl;