tracing: infrastructure for supporting binary record

Impact: save on memory for tracing

Current tracers are typically using a struct(like struct ftrace_entry,
struct ctx_switch_entry, struct special_entr etc...)to record a binary
event. These structs can only record a their own kind of events.
A new kind of tracer need a new struct and a lot of code too handle it.

So we need a generic binary record for events. This infrastructure
is for this purpose.

[fweisbec@gmail.com: rebase against latest -tip, make it safe while sched
tracing as reported by Steven Rostedt]

Signed-off-by: Lai Jiangshan <laijs@cn.fujitsu.com>
Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com>
Acked-by: Steven Rostedt <rostedt@goodmis.org>
LKML-Reference: <1236356510-8381-3-git-send-email-fweisbec@gmail.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
This commit is contained in:
Lai Jiangshan
2009-03-06 17:21:47 +01:00
committed by Ingo Molnar
parent 546e5354a6
commit 1427cdf059
7 changed files with 240 additions and 0 deletions

View File

@ -3792,6 +3792,62 @@ int __ftrace_vprintk(unsigned long ip, const char *fmt, va_list ap)
}
EXPORT_SYMBOL_GPL(__ftrace_vprintk);
/**
* trace_vbprintk - write binary msg to tracing buffer
*
* Caller must insure @fmt are valid when msg is in tracing buffer.
*/
int trace_vbprintk(unsigned long ip, const char *fmt, va_list args)
{
static DEFINE_SPINLOCK(trace_buf_lock);
static u32 trace_buf[TRACE_BUF_SIZE];
struct ring_buffer_event *event;
struct trace_array *tr = &global_trace;
struct trace_array_cpu *data;
struct bprintk_entry *entry;
unsigned long flags;
int resched;
int cpu, len = 0, size, pc;
if (tracing_disabled || !trace_bprintk_enable)
return 0;
pc = preempt_count();
resched = ftrace_preempt_disable();
cpu = raw_smp_processor_id();
data = tr->data[cpu];
if (unlikely(atomic_read(&data->disabled)))
goto out;
spin_lock_irqsave(&trace_buf_lock, flags);
len = vbin_printf(trace_buf, TRACE_BUF_SIZE, fmt, args);
if (len > TRACE_BUF_SIZE || len < 0)
goto out_unlock;
size = sizeof(*entry) + sizeof(u32) * len;
event = trace_buffer_lock_reserve(tr, TRACE_BPRINTK, size, flags, pc);
if (!event)
goto out_unlock;
entry = ring_buffer_event_data(event);
entry->ip = ip;
entry->fmt = fmt;
memcpy(entry->buf, trace_buf, sizeof(u32) * len);
ring_buffer_unlock_commit(tr->buffer, event);
out_unlock:
spin_unlock_irqrestore(&trace_buf_lock, flags);
out:
ftrace_preempt_enable(resched);
return len;
}
EXPORT_SYMBOL_GPL(trace_vbprintk);
static int trace_panic_handler(struct notifier_block *this,
unsigned long event, void *unused)
{