perf_counter: Fix cpuctx->task_ctx races
Peter noticed that we are sometimes reading cpuctx->task_ctx with interrupts enabled. Noticed-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Acked-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: Mike Galbraith <efault@gmx.de> Cc: Paul Mackerras <paulus@samba.org> Cc: Corey Ashford <cjashfor@linux.vnet.ibm.com> Cc: Marcelo Tosatti <mtosatti@redhat.com> Cc: Arnaldo Carvalho de Melo <acme@redhat.com> Cc: John Kacur <jkacur@redhat.com> LKML-Reference: <new-submission> Signed-off-by: Ingo Molnar <mingo@elte.hu>
This commit is contained in:
@@ -234,15 +234,18 @@ static void __perf_counter_remove_from_context(void *info)
|
|||||||
struct perf_counter_context *ctx = counter->ctx;
|
struct perf_counter_context *ctx = counter->ctx;
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
|
|
||||||
|
local_irq_save(flags);
|
||||||
/*
|
/*
|
||||||
* If this is a task context, we need to check whether it is
|
* If this is a task context, we need to check whether it is
|
||||||
* the current task context of this cpu. If not it has been
|
* the current task context of this cpu. If not it has been
|
||||||
* scheduled out before the smp call arrived.
|
* scheduled out before the smp call arrived.
|
||||||
*/
|
*/
|
||||||
if (ctx->task && cpuctx->task_ctx != ctx)
|
if (ctx->task && cpuctx->task_ctx != ctx) {
|
||||||
|
local_irq_restore(flags);
|
||||||
return;
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
spin_lock_irqsave(&ctx->lock, flags);
|
spin_lock(&ctx->lock);
|
||||||
/*
|
/*
|
||||||
* Protect the list operation against NMI by disabling the
|
* Protect the list operation against NMI by disabling the
|
||||||
* counters on a global level.
|
* counters on a global level.
|
||||||
@@ -382,14 +385,17 @@ static void __perf_counter_disable(void *info)
|
|||||||
struct perf_counter_context *ctx = counter->ctx;
|
struct perf_counter_context *ctx = counter->ctx;
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
|
|
||||||
|
local_irq_save(flags);
|
||||||
/*
|
/*
|
||||||
* If this is a per-task counter, need to check whether this
|
* If this is a per-task counter, need to check whether this
|
||||||
* counter's task is the current task on this cpu.
|
* counter's task is the current task on this cpu.
|
||||||
*/
|
*/
|
||||||
if (ctx->task && cpuctx->task_ctx != ctx)
|
if (ctx->task && cpuctx->task_ctx != ctx) {
|
||||||
|
local_irq_restore(flags);
|
||||||
return;
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
spin_lock_irqsave(&ctx->lock, flags);
|
spin_lock(&ctx->lock);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* If the counter is on, turn it off.
|
* If the counter is on, turn it off.
|
||||||
@@ -615,6 +621,7 @@ static void __perf_install_in_context(void *info)
|
|||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
int err;
|
int err;
|
||||||
|
|
||||||
|
local_irq_save(flags);
|
||||||
/*
|
/*
|
||||||
* If this is a task context, we need to check whether it is
|
* If this is a task context, we need to check whether it is
|
||||||
* the current task context of this cpu. If not it has been
|
* the current task context of this cpu. If not it has been
|
||||||
@@ -623,12 +630,14 @@ static void __perf_install_in_context(void *info)
|
|||||||
* on this cpu because it had no counters.
|
* on this cpu because it had no counters.
|
||||||
*/
|
*/
|
||||||
if (ctx->task && cpuctx->task_ctx != ctx) {
|
if (ctx->task && cpuctx->task_ctx != ctx) {
|
||||||
if (cpuctx->task_ctx || ctx->task != current)
|
if (cpuctx->task_ctx || ctx->task != current) {
|
||||||
|
local_irq_restore(flags);
|
||||||
return;
|
return;
|
||||||
|
}
|
||||||
cpuctx->task_ctx = ctx;
|
cpuctx->task_ctx = ctx;
|
||||||
}
|
}
|
||||||
|
|
||||||
spin_lock_irqsave(&ctx->lock, flags);
|
spin_lock(&ctx->lock);
|
||||||
ctx->is_active = 1;
|
ctx->is_active = 1;
|
||||||
update_context_time(ctx);
|
update_context_time(ctx);
|
||||||
|
|
||||||
@@ -745,17 +754,20 @@ static void __perf_counter_enable(void *info)
|
|||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
int err;
|
int err;
|
||||||
|
|
||||||
|
local_irq_save(flags);
|
||||||
/*
|
/*
|
||||||
* If this is a per-task counter, need to check whether this
|
* If this is a per-task counter, need to check whether this
|
||||||
* counter's task is the current task on this cpu.
|
* counter's task is the current task on this cpu.
|
||||||
*/
|
*/
|
||||||
if (ctx->task && cpuctx->task_ctx != ctx) {
|
if (ctx->task && cpuctx->task_ctx != ctx) {
|
||||||
if (cpuctx->task_ctx || ctx->task != current)
|
if (cpuctx->task_ctx || ctx->task != current) {
|
||||||
|
local_irq_restore(flags);
|
||||||
return;
|
return;
|
||||||
|
}
|
||||||
cpuctx->task_ctx = ctx;
|
cpuctx->task_ctx = ctx;
|
||||||
}
|
}
|
||||||
|
|
||||||
spin_lock_irqsave(&ctx->lock, flags);
|
spin_lock(&ctx->lock);
|
||||||
ctx->is_active = 1;
|
ctx->is_active = 1;
|
||||||
update_context_time(ctx);
|
update_context_time(ctx);
|
||||||
|
|
||||||
|
Reference in New Issue
Block a user