Merge branch 'perf/urgent' into perf/core
Merge reason: queue up dependent patch, update to -rc4 Signed-off-by: Ingo Molnar <mingo@elte.hu>
This commit is contained in:
307
kernel/sched.c
307
kernel/sched.c
@@ -2002,39 +2002,6 @@ static inline void check_class_changed(struct rq *rq, struct task_struct *p,
|
||||
p->sched_class->prio_changed(rq, p, oldprio, running);
|
||||
}
|
||||
|
||||
/**
|
||||
* kthread_bind - bind a just-created kthread to a cpu.
|
||||
* @p: thread created by kthread_create().
|
||||
* @cpu: cpu (might not be online, must be possible) for @k to run on.
|
||||
*
|
||||
* Description: This function is equivalent to set_cpus_allowed(),
|
||||
* except that @cpu doesn't need to be online, and the thread must be
|
||||
* stopped (i.e., just returned from kthread_create()).
|
||||
*
|
||||
* Function lives here instead of kthread.c because it messes with
|
||||
* scheduler internals which require locking.
|
||||
*/
|
||||
void kthread_bind(struct task_struct *p, unsigned int cpu)
|
||||
{
|
||||
struct rq *rq = cpu_rq(cpu);
|
||||
unsigned long flags;
|
||||
|
||||
/* Must have done schedule() in kthread() before we set_task_cpu */
|
||||
if (!wait_task_inactive(p, TASK_UNINTERRUPTIBLE)) {
|
||||
WARN_ON(1);
|
||||
return;
|
||||
}
|
||||
|
||||
raw_spin_lock_irqsave(&rq->lock, flags);
|
||||
update_rq_clock(rq);
|
||||
set_task_cpu(p, cpu);
|
||||
p->cpus_allowed = cpumask_of_cpu(cpu);
|
||||
p->rt.nr_cpus_allowed = 1;
|
||||
p->flags |= PF_THREAD_BOUND;
|
||||
raw_spin_unlock_irqrestore(&rq->lock, flags);
|
||||
}
|
||||
EXPORT_SYMBOL(kthread_bind);
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
/*
|
||||
* Is this task likely cache-hot:
|
||||
@@ -2044,6 +2011,9 @@ task_hot(struct task_struct *p, u64 now, struct sched_domain *sd)
|
||||
{
|
||||
s64 delta;
|
||||
|
||||
if (p->sched_class != &fair_sched_class)
|
||||
return 0;
|
||||
|
||||
/*
|
||||
* Buddy candidates are cache hot:
|
||||
*/
|
||||
@@ -2052,9 +2022,6 @@ task_hot(struct task_struct *p, u64 now, struct sched_domain *sd)
|
||||
&p->se == cfs_rq_of(&p->se)->last))
|
||||
return 1;
|
||||
|
||||
if (p->sched_class != &fair_sched_class)
|
||||
return 0;
|
||||
|
||||
if (sysctl_sched_migration_cost == -1)
|
||||
return 1;
|
||||
if (sysctl_sched_migration_cost == 0)
|
||||
@@ -2065,22 +2032,23 @@ task_hot(struct task_struct *p, u64 now, struct sched_domain *sd)
|
||||
return delta < (s64)sysctl_sched_migration_cost;
|
||||
}
|
||||
|
||||
|
||||
void set_task_cpu(struct task_struct *p, unsigned int new_cpu)
|
||||
{
|
||||
int old_cpu = task_cpu(p);
|
||||
struct cfs_rq *old_cfsrq = task_cfs_rq(p),
|
||||
*new_cfsrq = cpu_cfs_rq(old_cfsrq, new_cpu);
|
||||
#ifdef CONFIG_SCHED_DEBUG
|
||||
/*
|
||||
* We should never call set_task_cpu() on a blocked task,
|
||||
* ttwu() will sort out the placement.
|
||||
*/
|
||||
WARN_ON_ONCE(p->state != TASK_RUNNING && p->state != TASK_WAKING &&
|
||||
!(task_thread_info(p)->preempt_count & PREEMPT_ACTIVE));
|
||||
#endif
|
||||
|
||||
trace_sched_migrate_task(p, new_cpu);
|
||||
|
||||
if (old_cpu != new_cpu) {
|
||||
if (task_cpu(p) != new_cpu) {
|
||||
p->se.nr_migrations++;
|
||||
perf_sw_event(PERF_COUNT_SW_CPU_MIGRATIONS,
|
||||
1, 1, NULL, 0);
|
||||
perf_sw_event(PERF_COUNT_SW_CPU_MIGRATIONS, 1, 1, NULL, 0);
|
||||
}
|
||||
p->se.vruntime -= old_cfsrq->min_vruntime -
|
||||
new_cfsrq->min_vruntime;
|
||||
|
||||
__set_task_cpu(p, new_cpu);
|
||||
}
|
||||
@@ -2105,13 +2073,10 @@ migrate_task(struct task_struct *p, int dest_cpu, struct migration_req *req)
|
||||
|
||||
/*
|
||||
* If the task is not on a runqueue (and not running), then
|
||||
* it is sufficient to simply update the task's cpu field.
|
||||
* the next wake-up will properly place the task.
|
||||
*/
|
||||
if (!p->se.on_rq && !task_running(rq, p)) {
|
||||
update_rq_clock(rq);
|
||||
set_task_cpu(p, dest_cpu);
|
||||
if (!p->se.on_rq && !task_running(rq, p))
|
||||
return 0;
|
||||
}
|
||||
|
||||
init_completion(&req->done);
|
||||
req->task = p;
|
||||
@@ -2317,10 +2282,73 @@ void task_oncpu_function_call(struct task_struct *p,
|
||||
}
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
static int select_fallback_rq(int cpu, struct task_struct *p)
|
||||
{
|
||||
int dest_cpu;
|
||||
const struct cpumask *nodemask = cpumask_of_node(cpu_to_node(cpu));
|
||||
|
||||
/* Look for allowed, online CPU in same node. */
|
||||
for_each_cpu_and(dest_cpu, nodemask, cpu_active_mask)
|
||||
if (cpumask_test_cpu(dest_cpu, &p->cpus_allowed))
|
||||
return dest_cpu;
|
||||
|
||||
/* Any allowed, online CPU? */
|
||||
dest_cpu = cpumask_any_and(&p->cpus_allowed, cpu_active_mask);
|
||||
if (dest_cpu < nr_cpu_ids)
|
||||
return dest_cpu;
|
||||
|
||||
/* No more Mr. Nice Guy. */
|
||||
if (dest_cpu >= nr_cpu_ids) {
|
||||
rcu_read_lock();
|
||||
cpuset_cpus_allowed_locked(p, &p->cpus_allowed);
|
||||
rcu_read_unlock();
|
||||
dest_cpu = cpumask_any_and(cpu_active_mask, &p->cpus_allowed);
|
||||
|
||||
/*
|
||||
* Don't tell them about moving exiting tasks or
|
||||
* kernel threads (both mm NULL), since they never
|
||||
* leave kernel.
|
||||
*/
|
||||
if (p->mm && printk_ratelimit()) {
|
||||
printk(KERN_INFO "process %d (%s) no "
|
||||
"longer affine to cpu%d\n",
|
||||
task_pid_nr(p), p->comm, cpu);
|
||||
}
|
||||
}
|
||||
|
||||
return dest_cpu;
|
||||
}
|
||||
|
||||
/*
|
||||
* Called from:
|
||||
*
|
||||
* - fork, @p is stable because it isn't on the tasklist yet
|
||||
*
|
||||
* - exec, @p is unstable, retry loop
|
||||
*
|
||||
* - wake-up, we serialize ->cpus_allowed against TASK_WAKING so
|
||||
* we should be good.
|
||||
*/
|
||||
static inline
|
||||
int select_task_rq(struct task_struct *p, int sd_flags, int wake_flags)
|
||||
{
|
||||
return p->sched_class->select_task_rq(p, sd_flags, wake_flags);
|
||||
int cpu = p->sched_class->select_task_rq(p, sd_flags, wake_flags);
|
||||
|
||||
/*
|
||||
* In order not to call set_task_cpu() on a blocking task we need
|
||||
* to rely on ttwu() to place the task on a valid ->cpus_allowed
|
||||
* cpu.
|
||||
*
|
||||
* Since this is common to all placement strategies, this lives here.
|
||||
*
|
||||
* [ this allows ->select_task() to simply return task_cpu(p) and
|
||||
* not worry about this generic constraint ]
|
||||
*/
|
||||
if (unlikely(!cpumask_test_cpu(cpu, &p->cpus_allowed) ||
|
||||
!cpu_online(cpu)))
|
||||
cpu = select_fallback_rq(task_cpu(p), p);
|
||||
|
||||
return cpu;
|
||||
}
|
||||
#endif
|
||||
|
||||
@@ -2375,6 +2403,10 @@ static int try_to_wake_up(struct task_struct *p, unsigned int state,
|
||||
if (task_contributes_to_load(p))
|
||||
rq->nr_uninterruptible--;
|
||||
p->state = TASK_WAKING;
|
||||
|
||||
if (p->sched_class->task_waking)
|
||||
p->sched_class->task_waking(rq, p);
|
||||
|
||||
__task_rq_unlock(rq);
|
||||
|
||||
cpu = select_task_rq(p, SD_BALANCE_WAKE, wake_flags);
|
||||
@@ -2438,8 +2470,8 @@ out_running:
|
||||
|
||||
p->state = TASK_RUNNING;
|
||||
#ifdef CONFIG_SMP
|
||||
if (p->sched_class->task_wake_up)
|
||||
p->sched_class->task_wake_up(rq, p);
|
||||
if (p->sched_class->task_woken)
|
||||
p->sched_class->task_woken(rq, p);
|
||||
|
||||
if (unlikely(rq->idle_stamp)) {
|
||||
u64 delta = rq->clock - rq->idle_stamp;
|
||||
@@ -2538,14 +2570,6 @@ static void __sched_fork(struct task_struct *p)
|
||||
#ifdef CONFIG_PREEMPT_NOTIFIERS
|
||||
INIT_HLIST_HEAD(&p->preempt_notifiers);
|
||||
#endif
|
||||
|
||||
/*
|
||||
* We mark the process as running here, but have not actually
|
||||
* inserted it onto the runqueue yet. This guarantees that
|
||||
* nobody will actually run it, and a signal or other external
|
||||
* event cannot wake it up and insert it on the runqueue either.
|
||||
*/
|
||||
p->state = TASK_RUNNING;
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -2556,6 +2580,12 @@ void sched_fork(struct task_struct *p, int clone_flags)
|
||||
int cpu = get_cpu();
|
||||
|
||||
__sched_fork(p);
|
||||
/*
|
||||
* We mark the process as waking here. This guarantees that
|
||||
* nobody will actually run it, and a signal or other external
|
||||
* event cannot wake it up and insert it on the runqueue either.
|
||||
*/
|
||||
p->state = TASK_WAKING;
|
||||
|
||||
/*
|
||||
* Revert to default priority/policy on fork if requested.
|
||||
@@ -2624,14 +2654,15 @@ void wake_up_new_task(struct task_struct *p, unsigned long clone_flags)
|
||||
struct rq *rq;
|
||||
|
||||
rq = task_rq_lock(p, &flags);
|
||||
BUG_ON(p->state != TASK_RUNNING);
|
||||
BUG_ON(p->state != TASK_WAKING);
|
||||
p->state = TASK_RUNNING;
|
||||
update_rq_clock(rq);
|
||||
activate_task(rq, p, 0);
|
||||
trace_sched_wakeup_new(rq, p, 1);
|
||||
check_preempt_curr(rq, p, WF_FORK);
|
||||
#ifdef CONFIG_SMP
|
||||
if (p->sched_class->task_wake_up)
|
||||
p->sched_class->task_wake_up(rq, p);
|
||||
if (p->sched_class->task_woken)
|
||||
p->sched_class->task_woken(rq, p);
|
||||
#endif
|
||||
task_rq_unlock(rq, &flags);
|
||||
}
|
||||
@@ -3101,21 +3132,36 @@ static void double_rq_unlock(struct rq *rq1, struct rq *rq2)
|
||||
}
|
||||
|
||||
/*
|
||||
* If dest_cpu is allowed for this process, migrate the task to it.
|
||||
* This is accomplished by forcing the cpu_allowed mask to only
|
||||
* allow dest_cpu, which will force the cpu onto dest_cpu. Then
|
||||
* the cpu_allowed mask is restored.
|
||||
* sched_exec - execve() is a valuable balancing opportunity, because at
|
||||
* this point the task has the smallest effective memory and cache footprint.
|
||||
*/
|
||||
static void sched_migrate_task(struct task_struct *p, int dest_cpu)
|
||||
void sched_exec(void)
|
||||
{
|
||||
struct task_struct *p = current;
|
||||
struct migration_req req;
|
||||
int dest_cpu, this_cpu;
|
||||
unsigned long flags;
|
||||
struct rq *rq;
|
||||
|
||||
again:
|
||||
this_cpu = get_cpu();
|
||||
dest_cpu = select_task_rq(p, SD_BALANCE_EXEC, 0);
|
||||
if (dest_cpu == this_cpu) {
|
||||
put_cpu();
|
||||
return;
|
||||
}
|
||||
|
||||
rq = task_rq_lock(p, &flags);
|
||||
put_cpu();
|
||||
|
||||
/*
|
||||
* select_task_rq() can race against ->cpus_allowed
|
||||
*/
|
||||
if (!cpumask_test_cpu(dest_cpu, &p->cpus_allowed)
|
||||
|| unlikely(!cpu_active(dest_cpu)))
|
||||
goto out;
|
||||
|| unlikely(!cpu_active(dest_cpu))) {
|
||||
task_rq_unlock(rq, &flags);
|
||||
goto again;
|
||||
}
|
||||
|
||||
/* force the process onto the specified CPU */
|
||||
if (migrate_task(p, dest_cpu, &req)) {
|
||||
@@ -3130,23 +3176,9 @@ static void sched_migrate_task(struct task_struct *p, int dest_cpu)
|
||||
|
||||
return;
|
||||
}
|
||||
out:
|
||||
task_rq_unlock(rq, &flags);
|
||||
}
|
||||
|
||||
/*
|
||||
* sched_exec - execve() is a valuable balancing opportunity, because at
|
||||
* this point the task has the smallest effective memory and cache footprint.
|
||||
*/
|
||||
void sched_exec(void)
|
||||
{
|
||||
int new_cpu, this_cpu = get_cpu();
|
||||
new_cpu = select_task_rq(current, SD_BALANCE_EXEC, 0);
|
||||
put_cpu();
|
||||
if (new_cpu != this_cpu)
|
||||
sched_migrate_task(current, new_cpu);
|
||||
}
|
||||
|
||||
/*
|
||||
* pull_task - move a task from a remote runqueue to the local runqueue.
|
||||
* Both runqueues must be locked.
|
||||
@@ -5911,14 +5943,15 @@ EXPORT_SYMBOL(wait_for_completion_killable);
|
||||
*/
|
||||
bool try_wait_for_completion(struct completion *x)
|
||||
{
|
||||
unsigned long flags;
|
||||
int ret = 1;
|
||||
|
||||
spin_lock_irq(&x->wait.lock);
|
||||
spin_lock_irqsave(&x->wait.lock, flags);
|
||||
if (!x->done)
|
||||
ret = 0;
|
||||
else
|
||||
x->done--;
|
||||
spin_unlock_irq(&x->wait.lock);
|
||||
spin_unlock_irqrestore(&x->wait.lock, flags);
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL(try_wait_for_completion);
|
||||
@@ -5933,12 +5966,13 @@ EXPORT_SYMBOL(try_wait_for_completion);
|
||||
*/
|
||||
bool completion_done(struct completion *x)
|
||||
{
|
||||
unsigned long flags;
|
||||
int ret = 1;
|
||||
|
||||
spin_lock_irq(&x->wait.lock);
|
||||
spin_lock_irqsave(&x->wait.lock, flags);
|
||||
if (!x->done)
|
||||
ret = 0;
|
||||
spin_unlock_irq(&x->wait.lock);
|
||||
spin_unlock_irqrestore(&x->wait.lock, flags);
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL(completion_done);
|
||||
@@ -6457,7 +6491,7 @@ SYSCALL_DEFINE1(sched_getscheduler, pid_t, pid)
|
||||
return -EINVAL;
|
||||
|
||||
retval = -ESRCH;
|
||||
read_lock(&tasklist_lock);
|
||||
rcu_read_lock();
|
||||
p = find_process_by_pid(pid);
|
||||
if (p) {
|
||||
retval = security_task_getscheduler(p);
|
||||
@@ -6465,7 +6499,7 @@ SYSCALL_DEFINE1(sched_getscheduler, pid_t, pid)
|
||||
retval = p->policy
|
||||
| (p->sched_reset_on_fork ? SCHED_RESET_ON_FORK : 0);
|
||||
}
|
||||
read_unlock(&tasklist_lock);
|
||||
rcu_read_unlock();
|
||||
return retval;
|
||||
}
|
||||
|
||||
@@ -6483,7 +6517,7 @@ SYSCALL_DEFINE2(sched_getparam, pid_t, pid, struct sched_param __user *, param)
|
||||
if (!param || pid < 0)
|
||||
return -EINVAL;
|
||||
|
||||
read_lock(&tasklist_lock);
|
||||
rcu_read_lock();
|
||||
p = find_process_by_pid(pid);
|
||||
retval = -ESRCH;
|
||||
if (!p)
|
||||
@@ -6494,7 +6528,7 @@ SYSCALL_DEFINE2(sched_getparam, pid_t, pid, struct sched_param __user *, param)
|
||||
goto out_unlock;
|
||||
|
||||
lp.sched_priority = p->rt_priority;
|
||||
read_unlock(&tasklist_lock);
|
||||
rcu_read_unlock();
|
||||
|
||||
/*
|
||||
* This one might sleep, we cannot do it with a spinlock held ...
|
||||
@@ -6504,7 +6538,7 @@ SYSCALL_DEFINE2(sched_getparam, pid_t, pid, struct sched_param __user *, param)
|
||||
return retval;
|
||||
|
||||
out_unlock:
|
||||
read_unlock(&tasklist_lock);
|
||||
rcu_read_unlock();
|
||||
return retval;
|
||||
}
|
||||
|
||||
@@ -6515,22 +6549,18 @@ long sched_setaffinity(pid_t pid, const struct cpumask *in_mask)
|
||||
int retval;
|
||||
|
||||
get_online_cpus();
|
||||
read_lock(&tasklist_lock);
|
||||
rcu_read_lock();
|
||||
|
||||
p = find_process_by_pid(pid);
|
||||
if (!p) {
|
||||
read_unlock(&tasklist_lock);
|
||||
rcu_read_unlock();
|
||||
put_online_cpus();
|
||||
return -ESRCH;
|
||||
}
|
||||
|
||||
/*
|
||||
* It is not safe to call set_cpus_allowed with the
|
||||
* tasklist_lock held. We will bump the task_struct's
|
||||
* usage count and then drop tasklist_lock.
|
||||
*/
|
||||
/* Prevent p going away */
|
||||
get_task_struct(p);
|
||||
read_unlock(&tasklist_lock);
|
||||
rcu_read_unlock();
|
||||
|
||||
if (!alloc_cpumask_var(&cpus_allowed, GFP_KERNEL)) {
|
||||
retval = -ENOMEM;
|
||||
@@ -6616,7 +6646,7 @@ long sched_getaffinity(pid_t pid, struct cpumask *mask)
|
||||
int retval;
|
||||
|
||||
get_online_cpus();
|
||||
read_lock(&tasklist_lock);
|
||||
rcu_read_lock();
|
||||
|
||||
retval = -ESRCH;
|
||||
p = find_process_by_pid(pid);
|
||||
@@ -6632,7 +6662,7 @@ long sched_getaffinity(pid_t pid, struct cpumask *mask)
|
||||
task_rq_unlock(rq, &flags);
|
||||
|
||||
out_unlock:
|
||||
read_unlock(&tasklist_lock);
|
||||
rcu_read_unlock();
|
||||
put_online_cpus();
|
||||
|
||||
return retval;
|
||||
@@ -6876,7 +6906,7 @@ SYSCALL_DEFINE2(sched_rr_get_interval, pid_t, pid,
|
||||
return -EINVAL;
|
||||
|
||||
retval = -ESRCH;
|
||||
read_lock(&tasklist_lock);
|
||||
rcu_read_lock();
|
||||
p = find_process_by_pid(pid);
|
||||
if (!p)
|
||||
goto out_unlock;
|
||||
@@ -6889,13 +6919,13 @@ SYSCALL_DEFINE2(sched_rr_get_interval, pid_t, pid,
|
||||
time_slice = p->sched_class->get_rr_interval(rq, p);
|
||||
task_rq_unlock(rq, &flags);
|
||||
|
||||
read_unlock(&tasklist_lock);
|
||||
rcu_read_unlock();
|
||||
jiffies_to_timespec(time_slice, &t);
|
||||
retval = copy_to_user(interval, &t, sizeof(t)) ? -EFAULT : 0;
|
||||
return retval;
|
||||
|
||||
out_unlock:
|
||||
read_unlock(&tasklist_lock);
|
||||
rcu_read_unlock();
|
||||
return retval;
|
||||
}
|
||||
|
||||
@@ -6986,6 +7016,7 @@ void __cpuinit init_idle(struct task_struct *idle, int cpu)
|
||||
raw_spin_lock_irqsave(&rq->lock, flags);
|
||||
|
||||
__sched_fork(idle);
|
||||
idle->state = TASK_RUNNING;
|
||||
idle->se.exec_start = sched_clock();
|
||||
|
||||
cpumask_copy(&idle->cpus_allowed, cpumask_of(cpu));
|
||||
@@ -7100,7 +7131,23 @@ int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask)
|
||||
struct rq *rq;
|
||||
int ret = 0;
|
||||
|
||||
/*
|
||||
* Since we rely on wake-ups to migrate sleeping tasks, don't change
|
||||
* the ->cpus_allowed mask from under waking tasks, which would be
|
||||
* possible when we change rq->lock in ttwu(), so synchronize against
|
||||
* TASK_WAKING to avoid that.
|
||||
*/
|
||||
again:
|
||||
while (p->state == TASK_WAKING)
|
||||
cpu_relax();
|
||||
|
||||
rq = task_rq_lock(p, &flags);
|
||||
|
||||
if (p->state == TASK_WAKING) {
|
||||
task_rq_unlock(rq, &flags);
|
||||
goto again;
|
||||
}
|
||||
|
||||
if (!cpumask_intersects(new_mask, cpu_active_mask)) {
|
||||
ret = -EINVAL;
|
||||
goto out;
|
||||
@@ -7156,7 +7203,7 @@ EXPORT_SYMBOL_GPL(set_cpus_allowed_ptr);
|
||||
static int __migrate_task(struct task_struct *p, int src_cpu, int dest_cpu)
|
||||
{
|
||||
struct rq *rq_dest, *rq_src;
|
||||
int ret = 0, on_rq;
|
||||
int ret = 0;
|
||||
|
||||
if (unlikely(!cpu_active(dest_cpu)))
|
||||
return ret;
|
||||
@@ -7172,12 +7219,13 @@ static int __migrate_task(struct task_struct *p, int src_cpu, int dest_cpu)
|
||||
if (!cpumask_test_cpu(dest_cpu, &p->cpus_allowed))
|
||||
goto fail;
|
||||
|
||||
on_rq = p->se.on_rq;
|
||||
if (on_rq)
|
||||
/*
|
||||
* If we're not on a rq, the next wake-up will ensure we're
|
||||
* placed properly.
|
||||
*/
|
||||
if (p->se.on_rq) {
|
||||
deactivate_task(rq_src, p, 0);
|
||||
|
||||
set_task_cpu(p, dest_cpu);
|
||||
if (on_rq) {
|
||||
set_task_cpu(p, dest_cpu);
|
||||
activate_task(rq_dest, p, 0);
|
||||
check_preempt_curr(rq_dest, p, 0);
|
||||
}
|
||||
@@ -7273,37 +7321,10 @@ static int __migrate_task_irq(struct task_struct *p, int src_cpu, int dest_cpu)
|
||||
static void move_task_off_dead_cpu(int dead_cpu, struct task_struct *p)
|
||||
{
|
||||
int dest_cpu;
|
||||
const struct cpumask *nodemask = cpumask_of_node(cpu_to_node(dead_cpu));
|
||||
|
||||
again:
|
||||
/* Look for allowed, online CPU in same node. */
|
||||
for_each_cpu_and(dest_cpu, nodemask, cpu_active_mask)
|
||||
if (cpumask_test_cpu(dest_cpu, &p->cpus_allowed))
|
||||
goto move;
|
||||
dest_cpu = select_fallback_rq(dead_cpu, p);
|
||||
|
||||
/* Any allowed, online CPU? */
|
||||
dest_cpu = cpumask_any_and(&p->cpus_allowed, cpu_active_mask);
|
||||
if (dest_cpu < nr_cpu_ids)
|
||||
goto move;
|
||||
|
||||
/* No more Mr. Nice Guy. */
|
||||
if (dest_cpu >= nr_cpu_ids) {
|
||||
cpuset_cpus_allowed_locked(p, &p->cpus_allowed);
|
||||
dest_cpu = cpumask_any_and(cpu_active_mask, &p->cpus_allowed);
|
||||
|
||||
/*
|
||||
* Don't tell them about moving exiting tasks or
|
||||
* kernel threads (both mm NULL), since they never
|
||||
* leave kernel.
|
||||
*/
|
||||
if (p->mm && printk_ratelimit()) {
|
||||
printk(KERN_INFO "process %d (%s) no "
|
||||
"longer affine to cpu%d\n",
|
||||
task_pid_nr(p), p->comm, dead_cpu);
|
||||
}
|
||||
}
|
||||
|
||||
move:
|
||||
/* It can have affinity changed while we were choosing. */
|
||||
if (unlikely(!__migrate_task_irq(p, dead_cpu, dest_cpu)))
|
||||
goto again;
|
||||
@@ -9668,7 +9689,7 @@ void __init sched_init(void)
|
||||
#ifdef CONFIG_DEBUG_SPINLOCK_SLEEP
|
||||
static inline int preempt_count_equals(int preempt_offset)
|
||||
{
|
||||
int nested = preempt_count() & ~PREEMPT_ACTIVE;
|
||||
int nested = (preempt_count() & ~PREEMPT_ACTIVE) + rcu_preempt_depth();
|
||||
|
||||
return (nested == PREEMPT_INATOMIC_BASE + preempt_offset);
|
||||
}
|
||||
@@ -10083,7 +10104,7 @@ void sched_move_task(struct task_struct *tsk)
|
||||
|
||||
#ifdef CONFIG_FAIR_GROUP_SCHED
|
||||
if (tsk->sched_class->moved_group)
|
||||
tsk->sched_class->moved_group(tsk);
|
||||
tsk->sched_class->moved_group(tsk, on_rq);
|
||||
#endif
|
||||
|
||||
if (unlikely(running))
|
||||
|
Reference in New Issue
Block a user