perf_events: Improve task_sched_in()
This patch is an optimization in perf_event_task_sched_in() to avoid scheduling the events twice in a row. Without it, the perf_disable()/perf_enable() pair is invoked twice, thereby pinned events counts while scheduling flexible events and we go throuh hw_perf_enable() twice. By encapsulating, the whole sequence into perf_disable()/perf_enable() we ensure, hw_perf_enable() is going to be invoked only once because of the refcount protection. Signed-off-by: Stephane Eranian <eranian@google.com> Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> LKML-Reference: <1268288765-5326-1-git-send-email-eranian@google.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
This commit is contained in:
committed by
Ingo Molnar
parent
caa0142d84
commit
9b33fa6ba0
@@ -1368,6 +1368,8 @@ void perf_event_task_sched_in(struct task_struct *task)
|
|||||||
if (cpuctx->task_ctx == ctx)
|
if (cpuctx->task_ctx == ctx)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
|
perf_disable();
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* We want to keep the following priority order:
|
* We want to keep the following priority order:
|
||||||
* cpu pinned (that don't need to move), task pinned,
|
* cpu pinned (that don't need to move), task pinned,
|
||||||
@@ -1380,6 +1382,8 @@ void perf_event_task_sched_in(struct task_struct *task)
|
|||||||
ctx_sched_in(ctx, cpuctx, EVENT_FLEXIBLE);
|
ctx_sched_in(ctx, cpuctx, EVENT_FLEXIBLE);
|
||||||
|
|
||||||
cpuctx->task_ctx = ctx;
|
cpuctx->task_ctx = ctx;
|
||||||
|
|
||||||
|
perf_enable();
|
||||||
}
|
}
|
||||||
|
|
||||||
#define MAX_INTERRUPTS (~0ULL)
|
#define MAX_INTERRUPTS (~0ULL)
|
||||||
|
Reference in New Issue
Block a user