tracing: Use the perf recursion protection from trace event
When we commit a trace to perf, we first check if we are recursing in the same buffer so that we don't mess-up the buffer with a recursing trace. But later on, we do the same check from perf to avoid commit recursion. The recursion check is desired early before we touch the buffer but we want to do this check only once. Then export the recursion protection from perf and use it from the trace events before submitting a trace. v2: Put appropriate Reported-by tag Reported-by: Peter Zijlstra <peterz@infradead.org> Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com> Cc: Arnaldo Carvalho de Melo <acme@redhat.com> Cc: Paul Mackerras <paulus@samba.org> Cc: Steven Rostedt <rostedt@goodmis.org> Cc: Masami Hiramatsu <mhiramat@redhat.com> Cc: Jason Baron <jbaron@redhat.com> LKML-Reference: <1258864015-10579-1-git-send-email-fweisbec@gmail.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
This commit is contained in:
committed by
Ingo Molnar
parent
e25613683b
commit
ce71b9df88
@ -1208,11 +1208,12 @@ static __kprobes int kprobe_profile_func(struct kprobe *kp,
|
||||
struct trace_probe *tp = container_of(kp, struct trace_probe, rp.kp);
|
||||
struct ftrace_event_call *call = &tp->call;
|
||||
struct kprobe_trace_entry *entry;
|
||||
struct perf_trace_buf *trace_buf;
|
||||
struct trace_entry *ent;
|
||||
int size, __size, i, pc, __cpu;
|
||||
unsigned long irq_flags;
|
||||
char *trace_buf;
|
||||
char *raw_data;
|
||||
int *recursion;
|
||||
|
||||
pc = preempt_count();
|
||||
__size = SIZEOF_KPROBE_TRACE_ENTRY(tp->nr_args);
|
||||
@ -1227,6 +1228,10 @@ static __kprobes int kprobe_profile_func(struct kprobe *kp,
|
||||
* This also protects the rcu read side
|
||||
*/
|
||||
local_irq_save(irq_flags);
|
||||
|
||||
if (perf_swevent_get_recursion_context(&recursion))
|
||||
goto end_recursion;
|
||||
|
||||
__cpu = smp_processor_id();
|
||||
|
||||
if (in_nmi())
|
||||
@ -1237,18 +1242,7 @@ static __kprobes int kprobe_profile_func(struct kprobe *kp,
|
||||
if (!trace_buf)
|
||||
goto end;
|
||||
|
||||
trace_buf = per_cpu_ptr(trace_buf, __cpu);
|
||||
|
||||
if (trace_buf->recursion++)
|
||||
goto end_recursion;
|
||||
|
||||
/*
|
||||
* Make recursion update visible before entering perf_tp_event
|
||||
* so that we protect from perf recursions.
|
||||
*/
|
||||
barrier();
|
||||
|
||||
raw_data = trace_buf->buf;
|
||||
raw_data = per_cpu_ptr(trace_buf, __cpu);
|
||||
|
||||
/* Zero dead bytes from alignment to avoid buffer leak to userspace */
|
||||
*(u64 *)(&raw_data[size - sizeof(u64)]) = 0ULL;
|
||||
@ -1263,9 +1257,9 @@ static __kprobes int kprobe_profile_func(struct kprobe *kp,
|
||||
entry->args[i] = call_fetch(&tp->args[i].fetch, regs);
|
||||
perf_tp_event(call->id, entry->ip, 1, entry, size);
|
||||
|
||||
end_recursion:
|
||||
trace_buf->recursion--;
|
||||
end:
|
||||
perf_swevent_put_recursion_context(recursion);
|
||||
end_recursion:
|
||||
local_irq_restore(irq_flags);
|
||||
|
||||
return 0;
|
||||
@ -1278,10 +1272,11 @@ static __kprobes int kretprobe_profile_func(struct kretprobe_instance *ri,
|
||||
struct trace_probe *tp = container_of(ri->rp, struct trace_probe, rp);
|
||||
struct ftrace_event_call *call = &tp->call;
|
||||
struct kretprobe_trace_entry *entry;
|
||||
struct perf_trace_buf *trace_buf;
|
||||
struct trace_entry *ent;
|
||||
int size, __size, i, pc, __cpu;
|
||||
unsigned long irq_flags;
|
||||
char *trace_buf;
|
||||
int *recursion;
|
||||
char *raw_data;
|
||||
|
||||
pc = preempt_count();
|
||||
@ -1297,6 +1292,10 @@ static __kprobes int kretprobe_profile_func(struct kretprobe_instance *ri,
|
||||
* This also protects the rcu read side
|
||||
*/
|
||||
local_irq_save(irq_flags);
|
||||
|
||||
if (perf_swevent_get_recursion_context(&recursion))
|
||||
goto end_recursion;
|
||||
|
||||
__cpu = smp_processor_id();
|
||||
|
||||
if (in_nmi())
|
||||
@ -1307,18 +1306,7 @@ static __kprobes int kretprobe_profile_func(struct kretprobe_instance *ri,
|
||||
if (!trace_buf)
|
||||
goto end;
|
||||
|
||||
trace_buf = per_cpu_ptr(trace_buf, __cpu);
|
||||
|
||||
if (trace_buf->recursion++)
|
||||
goto end_recursion;
|
||||
|
||||
/*
|
||||
* Make recursion update visible before entering perf_tp_event
|
||||
* so that we protect from perf recursions.
|
||||
*/
|
||||
barrier();
|
||||
|
||||
raw_data = trace_buf->buf;
|
||||
raw_data = per_cpu_ptr(trace_buf, __cpu);
|
||||
|
||||
/* Zero dead bytes from alignment to avoid buffer leak to userspace */
|
||||
*(u64 *)(&raw_data[size - sizeof(u64)]) = 0ULL;
|
||||
@ -1334,9 +1322,9 @@ static __kprobes int kretprobe_profile_func(struct kretprobe_instance *ri,
|
||||
entry->args[i] = call_fetch(&tp->args[i].fetch, regs);
|
||||
perf_tp_event(call->id, entry->ret_ip, 1, entry, size);
|
||||
|
||||
end_recursion:
|
||||
trace_buf->recursion--;
|
||||
end:
|
||||
perf_swevent_put_recursion_context(recursion);
|
||||
end_recursion:
|
||||
local_irq_restore(irq_flags);
|
||||
|
||||
return 0;
|
||||
|
Reference in New Issue
Block a user