perf: Cure task_oncpu_function_call() races
Oleg reported that on architectures with __ARCH_WANT_INTERRUPTS_ON_CTXSW the IPI from task_oncpu_function_call() can land before perf_event_task_sched_in() and cause interesting situations for eg. perf_install_in_context(). This patch reworks the task_oncpu_function_call() interface to give a more usable primitive as well as rework all its users to hopefully be more obvious as well as remove the races. While looking at the code I also found a number of races against perf_event_task_sched_out() which can flip contexts between tasks so plug those too. Reported-and-reviewed-by: Oleg Nesterov <oleg@redhat.com> Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> LKML-Reference: <new-submission> Signed-off-by: Ingo Molnar <mingo@elte.hu>
This commit is contained in:
committed by
Ingo Molnar
parent
b84defe603
commit
fe4b04fa31
@@ -2265,27 +2265,6 @@ void kick_process(struct task_struct *p)
|
||||
EXPORT_SYMBOL_GPL(kick_process);
|
||||
#endif /* CONFIG_SMP */
|
||||
|
||||
/**
|
||||
* task_oncpu_function_call - call a function on the cpu on which a task runs
|
||||
* @p: the task to evaluate
|
||||
* @func: the function to be called
|
||||
* @info: the function call argument
|
||||
*
|
||||
* Calls the function @func when the task is currently running. This might
|
||||
* be on the current CPU, which just calls the function directly
|
||||
*/
|
||||
void task_oncpu_function_call(struct task_struct *p,
|
||||
void (*func) (void *info), void *info)
|
||||
{
|
||||
int cpu;
|
||||
|
||||
preempt_disable();
|
||||
cpu = task_cpu(p);
|
||||
if (task_curr(p))
|
||||
smp_call_function_single(cpu, func, info, 1);
|
||||
preempt_enable();
|
||||
}
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
/*
|
||||
* ->cpus_allowed is protected by either TASK_WAKING or rq->lock held.
|
||||
@@ -2776,9 +2755,12 @@ static inline void
|
||||
prepare_task_switch(struct rq *rq, struct task_struct *prev,
|
||||
struct task_struct *next)
|
||||
{
|
||||
sched_info_switch(prev, next);
|
||||
perf_event_task_sched_out(prev, next);
|
||||
fire_sched_out_preempt_notifiers(prev, next);
|
||||
prepare_lock_switch(rq, next);
|
||||
prepare_arch_switch(next);
|
||||
trace_sched_switch(prev, next);
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -2911,7 +2893,7 @@ context_switch(struct rq *rq, struct task_struct *prev,
|
||||
struct mm_struct *mm, *oldmm;
|
||||
|
||||
prepare_task_switch(rq, prev, next);
|
||||
trace_sched_switch(prev, next);
|
||||
|
||||
mm = next->mm;
|
||||
oldmm = prev->active_mm;
|
||||
/*
|
||||
@@ -3989,9 +3971,6 @@ need_resched_nonpreemptible:
|
||||
rq->skip_clock_update = 0;
|
||||
|
||||
if (likely(prev != next)) {
|
||||
sched_info_switch(prev, next);
|
||||
perf_event_task_sched_out(prev, next);
|
||||
|
||||
rq->nr_switches++;
|
||||
rq->curr = next;
|
||||
++*switch_count;
|
||||
|
Reference in New Issue
Block a user