Merge branch 'tracing-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip

* 'tracing-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip:
  kernel/profile.c: Switch /proc/irq/prof_cpu_mask to seq_file
  tracing: Export trace_profile_buf symbols
  tracing/events: use list_for_entry_continue
  tracing: remove max_tracer_type_len
  function-graph: use ftrace_graph_funcs directly
  tracing: Remove markers
  tracing: Allocate the ftrace event profile buffer dynamically
  tracing: Factorize the events profile accounting
This commit is contained in:
Linus Torvalds
2009-09-21 09:05:47 -07:00
26 changed files with 298 additions and 1659 deletions

View File

@@ -382,20 +382,14 @@ static inline int ftrace_get_offsets_##call( \
*
* NOTE: The insertion profile callback (ftrace_profile_<call>) is defined later
*
* static int ftrace_profile_enable_<call>(struct ftrace_event_call *event_call)
* static int ftrace_profile_enable_<call>(void)
* {
* int ret = 0;
*
* if (!atomic_inc_return(&event_call->profile_count))
* ret = register_trace_<call>(ftrace_profile_<call>);
*
* return ret;
* return register_trace_<call>(ftrace_profile_<call>);
* }
*
* static void ftrace_profile_disable_<call>(struct ftrace_event_call *event_call)
* static void ftrace_profile_disable_<call>(void)
* {
* if (atomic_add_negative(-1, &event->call->profile_count))
* unregister_trace_<call>(ftrace_profile_<call>);
* unregister_trace_<call>(ftrace_profile_<call>);
* }
*
*/
@@ -405,20 +399,14 @@ static inline int ftrace_get_offsets_##call( \
\
static void ftrace_profile_##call(proto); \
\
static int ftrace_profile_enable_##call(struct ftrace_event_call *event_call) \
static int ftrace_profile_enable_##call(void) \
{ \
int ret = 0; \
\
if (!atomic_inc_return(&event_call->profile_count)) \
ret = register_trace_##call(ftrace_profile_##call); \
\
return ret; \
return register_trace_##call(ftrace_profile_##call); \
} \
\
static void ftrace_profile_disable_##call(struct ftrace_event_call *event_call)\
static void ftrace_profile_disable_##call(void) \
{ \
if (atomic_add_negative(-1, &event_call->profile_count)) \
unregister_trace_##call(ftrace_profile_##call); \
unregister_trace_##call(ftrace_profile_##call); \
}
#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
@@ -660,11 +648,12 @@ __attribute__((section("_ftrace_events"))) event_##call = { \
* struct ftrace_raw_##call *entry;
* u64 __addr = 0, __count = 1;
* unsigned long irq_flags;
* struct trace_entry *ent;
* int __entry_size;
* int __data_size;
* int __cpu
* int pc;
*
* local_save_flags(irq_flags);
* pc = preempt_count();
*
* __data_size = ftrace_get_offsets_<call>(&__data_offsets, args);
@@ -675,25 +664,34 @@ __attribute__((section("_ftrace_events"))) event_##call = { \
* sizeof(u64));
* __entry_size -= sizeof(u32);
*
* do {
* char raw_data[__entry_size]; <- allocate our sample in the stack
* struct trace_entry *ent;
* // Protect the non nmi buffer
* // This also protects the rcu read side
* local_irq_save(irq_flags);
* __cpu = smp_processor_id();
*
* zero dead bytes from alignment to avoid stack leak to userspace:
* if (in_nmi())
* raw_data = rcu_dereference(trace_profile_buf_nmi);
* else
* raw_data = rcu_dereference(trace_profile_buf);
*
* *(u64 *)(&raw_data[__entry_size - sizeof(u64)]) = 0ULL;
* entry = (struct ftrace_raw_<call> *)raw_data;
* ent = &entry->ent;
* tracing_generic_entry_update(ent, irq_flags, pc);
* ent->type = event_call->id;
* if (!raw_data)
* goto end;
*
* <tstruct> <- do some jobs with dynamic arrays
* raw_data = per_cpu_ptr(raw_data, __cpu);
*
* <assign> <- affect our values
* //zero dead bytes from alignment to avoid stack leak to userspace:
* *(u64 *)(&raw_data[__entry_size - sizeof(u64)]) = 0ULL;
* entry = (struct ftrace_raw_<call> *)raw_data;
* ent = &entry->ent;
* tracing_generic_entry_update(ent, irq_flags, pc);
* ent->type = event_call->id;
*
* perf_tpcounter_event(event_call->id, __addr, __count, entry,
* __entry_size); <- submit them to perf counter
* } while (0);
* <tstruct> <- do some jobs with dynamic arrays
*
* <assign> <- affect our values
*
* perf_tpcounter_event(event_call->id, __addr, __count, entry,
* __entry_size); <- submit them to perf counter
*
* }
*/
@@ -716,11 +714,13 @@ static void ftrace_profile_##call(proto) \
struct ftrace_raw_##call *entry; \
u64 __addr = 0, __count = 1; \
unsigned long irq_flags; \
struct trace_entry *ent; \
int __entry_size; \
int __data_size; \
char *raw_data; \
int __cpu; \
int pc; \
\
local_save_flags(irq_flags); \
pc = preempt_count(); \
\
__data_size = ftrace_get_offsets_##call(&__data_offsets, args); \
@@ -728,23 +728,38 @@ static void ftrace_profile_##call(proto) \
sizeof(u64)); \
__entry_size -= sizeof(u32); \
\
do { \
char raw_data[__entry_size]; \
struct trace_entry *ent; \
if (WARN_ONCE(__entry_size > FTRACE_MAX_PROFILE_SIZE, \
"profile buffer not large enough")) \
return; \
\
*(u64 *)(&raw_data[__entry_size - sizeof(u64)]) = 0ULL; \
entry = (struct ftrace_raw_##call *)raw_data; \
ent = &entry->ent; \
tracing_generic_entry_update(ent, irq_flags, pc); \
ent->type = event_call->id; \
local_irq_save(irq_flags); \
__cpu = smp_processor_id(); \
\
tstruct \
if (in_nmi()) \
raw_data = rcu_dereference(trace_profile_buf_nmi); \
else \
raw_data = rcu_dereference(trace_profile_buf); \
\
{ assign; } \
if (!raw_data) \
goto end; \
\
perf_tpcounter_event(event_call->id, __addr, __count, entry,\
raw_data = per_cpu_ptr(raw_data, __cpu); \
\
*(u64 *)(&raw_data[__entry_size - sizeof(u64)]) = 0ULL; \
entry = (struct ftrace_raw_##call *)raw_data; \
ent = &entry->ent; \
tracing_generic_entry_update(ent, irq_flags, pc); \
ent->type = event_call->id; \
\
tstruct \
\
{ assign; } \
\
perf_tpcounter_event(event_call->id, __addr, __count, entry, \
__entry_size); \
} while (0); \
\
end: \
local_irq_restore(irq_flags); \
\
}