Merge branch 'linus' into sched/urgent
This commit is contained in:
@@ -1013,16 +1013,33 @@ static void yield_task_fair(struct rq *rq)
|
||||
* search starts with cpus closest then further out as needed,
|
||||
* so we always favor a closer, idle cpu.
|
||||
* Domains may include CPUs that are not usable for migration,
|
||||
* hence we need to mask them out (cpu_active_map)
|
||||
* hence we need to mask them out (cpu_active_mask)
|
||||
*
|
||||
* Returns the CPU we should wake onto.
|
||||
*/
|
||||
#if defined(ARCH_HAS_SCHED_WAKE_IDLE)
|
||||
static int wake_idle(int cpu, struct task_struct *p)
|
||||
{
|
||||
cpumask_t tmp;
|
||||
struct sched_domain *sd;
|
||||
int i;
|
||||
unsigned int chosen_wakeup_cpu;
|
||||
int this_cpu;
|
||||
|
||||
/*
|
||||
* At POWERSAVINGS_BALANCE_WAKEUP level, if both this_cpu and prev_cpu
|
||||
* are idle and this is not a kernel thread and this task's affinity
|
||||
* allows it to be moved to preferred cpu, then just move!
|
||||
*/
|
||||
|
||||
this_cpu = smp_processor_id();
|
||||
chosen_wakeup_cpu =
|
||||
cpu_rq(this_cpu)->rd->sched_mc_preferred_wakeup_cpu;
|
||||
|
||||
if (sched_mc_power_savings >= POWERSAVINGS_BALANCE_WAKEUP &&
|
||||
idle_cpu(cpu) && idle_cpu(this_cpu) &&
|
||||
p->mm && !(p->flags & PF_KTHREAD) &&
|
||||
cpu_isset(chosen_wakeup_cpu, p->cpus_allowed))
|
||||
return chosen_wakeup_cpu;
|
||||
|
||||
/*
|
||||
* If it is idle, then it is the best cpu to run this task.
|
||||
@@ -1040,10 +1057,9 @@ static int wake_idle(int cpu, struct task_struct *p)
|
||||
if ((sd->flags & SD_WAKE_IDLE)
|
||||
|| ((sd->flags & SD_WAKE_IDLE_FAR)
|
||||
&& !task_hot(p, task_rq(p)->clock, sd))) {
|
||||
cpus_and(tmp, sd->span, p->cpus_allowed);
|
||||
cpus_and(tmp, tmp, cpu_active_map);
|
||||
for_each_cpu_mask_nr(i, tmp) {
|
||||
if (idle_cpu(i)) {
|
||||
for_each_cpu_and(i, sched_domain_span(sd),
|
||||
&p->cpus_allowed) {
|
||||
if (cpu_active(i) && idle_cpu(i)) {
|
||||
if (i != task_cpu(p)) {
|
||||
schedstat_inc(p,
|
||||
se.nr_wakeups_idle);
|
||||
@@ -1236,13 +1252,13 @@ static int select_task_rq_fair(struct task_struct *p, int sync)
|
||||
* this_cpu and prev_cpu are present in:
|
||||
*/
|
||||
for_each_domain(this_cpu, sd) {
|
||||
if (cpu_isset(prev_cpu, sd->span)) {
|
||||
if (cpumask_test_cpu(prev_cpu, sched_domain_span(sd))) {
|
||||
this_sd = sd;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (unlikely(!cpu_isset(this_cpu, p->cpus_allowed)))
|
||||
if (unlikely(!cpumask_test_cpu(this_cpu, &p->cpus_allowed)))
|
||||
goto out;
|
||||
|
||||
/*
|
||||
|
Reference in New Issue
Block a user