Merge branch 'perf/core' of git://git.kernel.org/pub/scm/linux/kernel/git/frederic/random-tracing into perf/core
This commit is contained in:
@ -28,7 +28,7 @@ extern void die(const char *, struct pt_regs *,long);
|
|||||||
extern int __must_check __die(const char *, struct pt_regs *, long);
|
extern int __must_check __die(const char *, struct pt_regs *, long);
|
||||||
extern void show_registers(struct pt_regs *regs);
|
extern void show_registers(struct pt_regs *regs);
|
||||||
extern void show_trace(struct task_struct *t, struct pt_regs *regs,
|
extern void show_trace(struct task_struct *t, struct pt_regs *regs,
|
||||||
unsigned long *sp, unsigned long bp);
|
unsigned long *sp);
|
||||||
extern void __show_regs(struct pt_regs *regs, int all);
|
extern void __show_regs(struct pt_regs *regs, int all);
|
||||||
extern void show_regs(struct pt_regs *regs);
|
extern void show_regs(struct pt_regs *regs);
|
||||||
extern unsigned long oops_begin(void);
|
extern unsigned long oops_begin(void);
|
||||||
|
@ -7,6 +7,7 @@
|
|||||||
#define _ASM_X86_STACKTRACE_H
|
#define _ASM_X86_STACKTRACE_H
|
||||||
|
|
||||||
#include <linux/uaccess.h>
|
#include <linux/uaccess.h>
|
||||||
|
#include <linux/ptrace.h>
|
||||||
|
|
||||||
extern int kstack_depth_to_print;
|
extern int kstack_depth_to_print;
|
||||||
|
|
||||||
@ -46,7 +47,7 @@ struct stacktrace_ops {
|
|||||||
};
|
};
|
||||||
|
|
||||||
void dump_trace(struct task_struct *tsk, struct pt_regs *regs,
|
void dump_trace(struct task_struct *tsk, struct pt_regs *regs,
|
||||||
unsigned long *stack, unsigned long bp,
|
unsigned long *stack,
|
||||||
const struct stacktrace_ops *ops, void *data);
|
const struct stacktrace_ops *ops, void *data);
|
||||||
|
|
||||||
#ifdef CONFIG_X86_32
|
#ifdef CONFIG_X86_32
|
||||||
@ -57,13 +58,39 @@ void dump_trace(struct task_struct *tsk, struct pt_regs *regs,
|
|||||||
#define get_bp(bp) asm("movq %%rbp, %0" : "=r" (bp) :)
|
#define get_bp(bp) asm("movq %%rbp, %0" : "=r" (bp) :)
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
#ifdef CONFIG_FRAME_POINTER
|
||||||
|
static inline unsigned long
|
||||||
|
stack_frame(struct task_struct *task, struct pt_regs *regs)
|
||||||
|
{
|
||||||
|
unsigned long bp;
|
||||||
|
|
||||||
|
if (regs)
|
||||||
|
return regs->bp;
|
||||||
|
|
||||||
|
if (task == current) {
|
||||||
|
/* Grab bp right from our regs */
|
||||||
|
get_bp(bp);
|
||||||
|
return bp;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* bp is the last reg pushed by switch_to */
|
||||||
|
return *(unsigned long *)task->thread.sp;
|
||||||
|
}
|
||||||
|
#else
|
||||||
|
static inline unsigned long
|
||||||
|
stack_frame(struct task_struct *task, struct pt_regs *regs)
|
||||||
|
{
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
extern void
|
extern void
|
||||||
show_trace_log_lvl(struct task_struct *task, struct pt_regs *regs,
|
show_trace_log_lvl(struct task_struct *task, struct pt_regs *regs,
|
||||||
unsigned long *stack, unsigned long bp, char *log_lvl);
|
unsigned long *stack, char *log_lvl);
|
||||||
|
|
||||||
extern void
|
extern void
|
||||||
show_stack_log_lvl(struct task_struct *task, struct pt_regs *regs,
|
show_stack_log_lvl(struct task_struct *task, struct pt_regs *regs,
|
||||||
unsigned long *sp, unsigned long bp, char *log_lvl);
|
unsigned long *sp, char *log_lvl);
|
||||||
|
|
||||||
extern unsigned int code_bytes;
|
extern unsigned int code_bytes;
|
||||||
|
|
||||||
|
@ -1657,7 +1657,7 @@ perf_callchain_kernel(struct perf_callchain_entry *entry, struct pt_regs *regs)
|
|||||||
|
|
||||||
perf_callchain_store(entry, regs->ip);
|
perf_callchain_store(entry, regs->ip);
|
||||||
|
|
||||||
dump_trace(NULL, regs, NULL, regs->bp, &backtrace_ops, entry);
|
dump_trace(NULL, regs, NULL, &backtrace_ops, entry);
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef CONFIG_COMPAT
|
#ifdef CONFIG_COMPAT
|
||||||
|
@ -175,21 +175,21 @@ static const struct stacktrace_ops print_trace_ops = {
|
|||||||
|
|
||||||
void
|
void
|
||||||
show_trace_log_lvl(struct task_struct *task, struct pt_regs *regs,
|
show_trace_log_lvl(struct task_struct *task, struct pt_regs *regs,
|
||||||
unsigned long *stack, unsigned long bp, char *log_lvl)
|
unsigned long *stack, char *log_lvl)
|
||||||
{
|
{
|
||||||
printk("%sCall Trace:\n", log_lvl);
|
printk("%sCall Trace:\n", log_lvl);
|
||||||
dump_trace(task, regs, stack, bp, &print_trace_ops, log_lvl);
|
dump_trace(task, regs, stack, &print_trace_ops, log_lvl);
|
||||||
}
|
}
|
||||||
|
|
||||||
void show_trace(struct task_struct *task, struct pt_regs *regs,
|
void show_trace(struct task_struct *task, struct pt_regs *regs,
|
||||||
unsigned long *stack, unsigned long bp)
|
unsigned long *stack)
|
||||||
{
|
{
|
||||||
show_trace_log_lvl(task, regs, stack, bp, "");
|
show_trace_log_lvl(task, regs, stack, "");
|
||||||
}
|
}
|
||||||
|
|
||||||
void show_stack(struct task_struct *task, unsigned long *sp)
|
void show_stack(struct task_struct *task, unsigned long *sp)
|
||||||
{
|
{
|
||||||
show_stack_log_lvl(task, NULL, sp, 0, "");
|
show_stack_log_lvl(task, NULL, sp, "");
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -210,7 +210,7 @@ void dump_stack(void)
|
|||||||
init_utsname()->release,
|
init_utsname()->release,
|
||||||
(int)strcspn(init_utsname()->version, " "),
|
(int)strcspn(init_utsname()->version, " "),
|
||||||
init_utsname()->version);
|
init_utsname()->version);
|
||||||
show_trace(NULL, NULL, &stack, bp);
|
show_trace(NULL, NULL, &stack);
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(dump_stack);
|
EXPORT_SYMBOL(dump_stack);
|
||||||
|
|
||||||
|
@ -17,11 +17,12 @@
|
|||||||
#include <asm/stacktrace.h>
|
#include <asm/stacktrace.h>
|
||||||
|
|
||||||
|
|
||||||
void dump_trace(struct task_struct *task, struct pt_regs *regs,
|
void dump_trace(struct task_struct *task,
|
||||||
unsigned long *stack, unsigned long bp,
|
struct pt_regs *regs, unsigned long *stack,
|
||||||
const struct stacktrace_ops *ops, void *data)
|
const struct stacktrace_ops *ops, void *data)
|
||||||
{
|
{
|
||||||
int graph = 0;
|
int graph = 0;
|
||||||
|
unsigned long bp;
|
||||||
|
|
||||||
if (!task)
|
if (!task)
|
||||||
task = current;
|
task = current;
|
||||||
@ -34,18 +35,7 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
|
|||||||
stack = (unsigned long *)task->thread.sp;
|
stack = (unsigned long *)task->thread.sp;
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef CONFIG_FRAME_POINTER
|
bp = stack_frame(task, regs);
|
||||||
if (!bp) {
|
|
||||||
if (task == current) {
|
|
||||||
/* Grab bp right from our regs */
|
|
||||||
get_bp(bp);
|
|
||||||
} else {
|
|
||||||
/* bp is the last reg pushed by switch_to */
|
|
||||||
bp = *(unsigned long *) task->thread.sp;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
#endif
|
|
||||||
|
|
||||||
for (;;) {
|
for (;;) {
|
||||||
struct thread_info *context;
|
struct thread_info *context;
|
||||||
|
|
||||||
@ -65,7 +55,7 @@ EXPORT_SYMBOL(dump_trace);
|
|||||||
|
|
||||||
void
|
void
|
||||||
show_stack_log_lvl(struct task_struct *task, struct pt_regs *regs,
|
show_stack_log_lvl(struct task_struct *task, struct pt_regs *regs,
|
||||||
unsigned long *sp, unsigned long bp, char *log_lvl)
|
unsigned long *sp, char *log_lvl)
|
||||||
{
|
{
|
||||||
unsigned long *stack;
|
unsigned long *stack;
|
||||||
int i;
|
int i;
|
||||||
@ -87,7 +77,7 @@ show_stack_log_lvl(struct task_struct *task, struct pt_regs *regs,
|
|||||||
touch_nmi_watchdog();
|
touch_nmi_watchdog();
|
||||||
}
|
}
|
||||||
printk(KERN_CONT "\n");
|
printk(KERN_CONT "\n");
|
||||||
show_trace_log_lvl(task, regs, sp, bp, log_lvl);
|
show_trace_log_lvl(task, regs, sp, log_lvl);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
@ -112,8 +102,7 @@ void show_registers(struct pt_regs *regs)
|
|||||||
u8 *ip;
|
u8 *ip;
|
||||||
|
|
||||||
printk(KERN_EMERG "Stack:\n");
|
printk(KERN_EMERG "Stack:\n");
|
||||||
show_stack_log_lvl(NULL, regs, ®s->sp,
|
show_stack_log_lvl(NULL, regs, ®s->sp, KERN_EMERG);
|
||||||
0, KERN_EMERG);
|
|
||||||
|
|
||||||
printk(KERN_EMERG "Code: ");
|
printk(KERN_EMERG "Code: ");
|
||||||
|
|
||||||
|
@ -139,8 +139,8 @@ fixup_bp_irq_link(unsigned long bp, unsigned long *stack,
|
|||||||
* severe exception (double fault, nmi, stack fault, debug, mce) hardware stack
|
* severe exception (double fault, nmi, stack fault, debug, mce) hardware stack
|
||||||
*/
|
*/
|
||||||
|
|
||||||
void dump_trace(struct task_struct *task, struct pt_regs *regs,
|
void dump_trace(struct task_struct *task,
|
||||||
unsigned long *stack, unsigned long bp,
|
struct pt_regs *regs, unsigned long *stack,
|
||||||
const struct stacktrace_ops *ops, void *data)
|
const struct stacktrace_ops *ops, void *data)
|
||||||
{
|
{
|
||||||
const unsigned cpu = get_cpu();
|
const unsigned cpu = get_cpu();
|
||||||
@ -149,6 +149,7 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
|
|||||||
unsigned used = 0;
|
unsigned used = 0;
|
||||||
struct thread_info *tinfo;
|
struct thread_info *tinfo;
|
||||||
int graph = 0;
|
int graph = 0;
|
||||||
|
unsigned long bp;
|
||||||
|
|
||||||
if (!task)
|
if (!task)
|
||||||
task = current;
|
task = current;
|
||||||
@ -160,18 +161,7 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
|
|||||||
stack = (unsigned long *)task->thread.sp;
|
stack = (unsigned long *)task->thread.sp;
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef CONFIG_FRAME_POINTER
|
bp = stack_frame(task, regs);
|
||||||
if (!bp) {
|
|
||||||
if (task == current) {
|
|
||||||
/* Grab bp right from our regs */
|
|
||||||
get_bp(bp);
|
|
||||||
} else {
|
|
||||||
/* bp is the last reg pushed by switch_to */
|
|
||||||
bp = *(unsigned long *) task->thread.sp;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
#endif
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Print function call entries in all stacks, starting at the
|
* Print function call entries in all stacks, starting at the
|
||||||
* current stack address. If the stacks consist of nested
|
* current stack address. If the stacks consist of nested
|
||||||
@ -235,7 +225,7 @@ EXPORT_SYMBOL(dump_trace);
|
|||||||
|
|
||||||
void
|
void
|
||||||
show_stack_log_lvl(struct task_struct *task, struct pt_regs *regs,
|
show_stack_log_lvl(struct task_struct *task, struct pt_regs *regs,
|
||||||
unsigned long *sp, unsigned long bp, char *log_lvl)
|
unsigned long *sp, char *log_lvl)
|
||||||
{
|
{
|
||||||
unsigned long *irq_stack_end;
|
unsigned long *irq_stack_end;
|
||||||
unsigned long *irq_stack;
|
unsigned long *irq_stack;
|
||||||
@ -279,7 +269,7 @@ show_stack_log_lvl(struct task_struct *task, struct pt_regs *regs,
|
|||||||
preempt_enable();
|
preempt_enable();
|
||||||
|
|
||||||
printk(KERN_CONT "\n");
|
printk(KERN_CONT "\n");
|
||||||
show_trace_log_lvl(task, regs, sp, bp, log_lvl);
|
show_trace_log_lvl(task, regs, sp, log_lvl);
|
||||||
}
|
}
|
||||||
|
|
||||||
void show_registers(struct pt_regs *regs)
|
void show_registers(struct pt_regs *regs)
|
||||||
@ -308,7 +298,7 @@ void show_registers(struct pt_regs *regs)
|
|||||||
|
|
||||||
printk(KERN_EMERG "Stack:\n");
|
printk(KERN_EMERG "Stack:\n");
|
||||||
show_stack_log_lvl(NULL, regs, (unsigned long *)sp,
|
show_stack_log_lvl(NULL, regs, (unsigned long *)sp,
|
||||||
regs->bp, KERN_EMERG);
|
KERN_EMERG);
|
||||||
|
|
||||||
printk(KERN_EMERG "Code: ");
|
printk(KERN_EMERG "Code: ");
|
||||||
|
|
||||||
|
@ -91,8 +91,7 @@ void exit_thread(void)
|
|||||||
void show_regs(struct pt_regs *regs)
|
void show_regs(struct pt_regs *regs)
|
||||||
{
|
{
|
||||||
show_registers(regs);
|
show_registers(regs);
|
||||||
show_trace(NULL, regs, (unsigned long *)kernel_stack_pointer(regs),
|
show_trace(NULL, regs, (unsigned long *)kernel_stack_pointer(regs));
|
||||||
regs->bp);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void show_regs_common(void)
|
void show_regs_common(void)
|
||||||
|
@ -73,22 +73,22 @@ static const struct stacktrace_ops save_stack_ops_nosched = {
|
|||||||
*/
|
*/
|
||||||
void save_stack_trace(struct stack_trace *trace)
|
void save_stack_trace(struct stack_trace *trace)
|
||||||
{
|
{
|
||||||
dump_trace(current, NULL, NULL, 0, &save_stack_ops, trace);
|
dump_trace(current, NULL, NULL, &save_stack_ops, trace);
|
||||||
if (trace->nr_entries < trace->max_entries)
|
if (trace->nr_entries < trace->max_entries)
|
||||||
trace->entries[trace->nr_entries++] = ULONG_MAX;
|
trace->entries[trace->nr_entries++] = ULONG_MAX;
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(save_stack_trace);
|
EXPORT_SYMBOL_GPL(save_stack_trace);
|
||||||
|
|
||||||
void save_stack_trace_bp(struct stack_trace *trace, unsigned long bp)
|
void save_stack_trace_regs(struct stack_trace *trace, struct pt_regs *regs)
|
||||||
{
|
{
|
||||||
dump_trace(current, NULL, NULL, bp, &save_stack_ops, trace);
|
dump_trace(current, regs, NULL, &save_stack_ops, trace);
|
||||||
if (trace->nr_entries < trace->max_entries)
|
if (trace->nr_entries < trace->max_entries)
|
||||||
trace->entries[trace->nr_entries++] = ULONG_MAX;
|
trace->entries[trace->nr_entries++] = ULONG_MAX;
|
||||||
}
|
}
|
||||||
|
|
||||||
void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace)
|
void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace)
|
||||||
{
|
{
|
||||||
dump_trace(tsk, NULL, NULL, 0, &save_stack_ops_nosched, trace);
|
dump_trace(tsk, NULL, NULL, &save_stack_ops_nosched, trace);
|
||||||
if (trace->nr_entries < trace->max_entries)
|
if (trace->nr_entries < trace->max_entries)
|
||||||
trace->entries[trace->nr_entries++] = ULONG_MAX;
|
trace->entries[trace->nr_entries++] = ULONG_MAX;
|
||||||
}
|
}
|
||||||
|
@ -185,7 +185,7 @@ void kmemcheck_error_save(enum kmemcheck_shadow state,
|
|||||||
e->trace.entries = e->trace_entries;
|
e->trace.entries = e->trace_entries;
|
||||||
e->trace.max_entries = ARRAY_SIZE(e->trace_entries);
|
e->trace.max_entries = ARRAY_SIZE(e->trace_entries);
|
||||||
e->trace.skip = 0;
|
e->trace.skip = 0;
|
||||||
save_stack_trace_bp(&e->trace, regs->bp);
|
save_stack_trace_regs(&e->trace, regs);
|
||||||
|
|
||||||
/* Round address down to nearest 16 bytes */
|
/* Round address down to nearest 16 bytes */
|
||||||
shadow_copy = kmemcheck_shadow_lookup(address
|
shadow_copy = kmemcheck_shadow_lookup(address
|
||||||
|
@ -126,7 +126,7 @@ x86_backtrace(struct pt_regs * const regs, unsigned int depth)
|
|||||||
if (!user_mode_vm(regs)) {
|
if (!user_mode_vm(regs)) {
|
||||||
unsigned long stack = kernel_stack_pointer(regs);
|
unsigned long stack = kernel_stack_pointer(regs);
|
||||||
if (depth)
|
if (depth)
|
||||||
dump_trace(NULL, regs, (unsigned long *)stack, 0,
|
dump_trace(NULL, regs, (unsigned long *)stack,
|
||||||
&backtrace_ops, &depth);
|
&backtrace_ops, &depth);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
@ -154,12 +154,14 @@ enum {
|
|||||||
TRACE_EVENT_FL_ENABLED_BIT,
|
TRACE_EVENT_FL_ENABLED_BIT,
|
||||||
TRACE_EVENT_FL_FILTERED_BIT,
|
TRACE_EVENT_FL_FILTERED_BIT,
|
||||||
TRACE_EVENT_FL_RECORDED_CMD_BIT,
|
TRACE_EVENT_FL_RECORDED_CMD_BIT,
|
||||||
|
TRACE_EVENT_FL_CAP_ANY_BIT,
|
||||||
};
|
};
|
||||||
|
|
||||||
enum {
|
enum {
|
||||||
TRACE_EVENT_FL_ENABLED = (1 << TRACE_EVENT_FL_ENABLED_BIT),
|
TRACE_EVENT_FL_ENABLED = (1 << TRACE_EVENT_FL_ENABLED_BIT),
|
||||||
TRACE_EVENT_FL_FILTERED = (1 << TRACE_EVENT_FL_FILTERED_BIT),
|
TRACE_EVENT_FL_FILTERED = (1 << TRACE_EVENT_FL_FILTERED_BIT),
|
||||||
TRACE_EVENT_FL_RECORDED_CMD = (1 << TRACE_EVENT_FL_RECORDED_CMD_BIT),
|
TRACE_EVENT_FL_RECORDED_CMD = (1 << TRACE_EVENT_FL_RECORDED_CMD_BIT),
|
||||||
|
TRACE_EVENT_FL_CAP_ANY = (1 << TRACE_EVENT_FL_CAP_ANY_BIT),
|
||||||
};
|
};
|
||||||
|
|
||||||
struct ftrace_event_call {
|
struct ftrace_event_call {
|
||||||
@ -196,6 +198,14 @@ struct ftrace_event_call {
|
|||||||
#endif
|
#endif
|
||||||
};
|
};
|
||||||
|
|
||||||
|
#define __TRACE_EVENT_FLAGS(name, value) \
|
||||||
|
static int __init trace_init_flags_##name(void) \
|
||||||
|
{ \
|
||||||
|
event_##name.flags = value; \
|
||||||
|
return 0; \
|
||||||
|
} \
|
||||||
|
early_initcall(trace_init_flags_##name);
|
||||||
|
|
||||||
#define PERF_MAX_TRACE_SIZE 2048
|
#define PERF_MAX_TRACE_SIZE 2048
|
||||||
|
|
||||||
#define MAX_FILTER_PRED 32
|
#define MAX_FILTER_PRED 32
|
||||||
|
@ -2,6 +2,7 @@
|
|||||||
#define __LINUX_STACKTRACE_H
|
#define __LINUX_STACKTRACE_H
|
||||||
|
|
||||||
struct task_struct;
|
struct task_struct;
|
||||||
|
struct pt_regs;
|
||||||
|
|
||||||
#ifdef CONFIG_STACKTRACE
|
#ifdef CONFIG_STACKTRACE
|
||||||
struct task_struct;
|
struct task_struct;
|
||||||
@ -13,7 +14,8 @@ struct stack_trace {
|
|||||||
};
|
};
|
||||||
|
|
||||||
extern void save_stack_trace(struct stack_trace *trace);
|
extern void save_stack_trace(struct stack_trace *trace);
|
||||||
extern void save_stack_trace_bp(struct stack_trace *trace, unsigned long bp);
|
extern void save_stack_trace_regs(struct stack_trace *trace,
|
||||||
|
struct pt_regs *regs);
|
||||||
extern void save_stack_trace_tsk(struct task_struct *tsk,
|
extern void save_stack_trace_tsk(struct task_struct *tsk,
|
||||||
struct stack_trace *trace);
|
struct stack_trace *trace);
|
||||||
|
|
||||||
|
@ -127,8 +127,6 @@ extern struct trace_event_functions exit_syscall_print_funcs;
|
|||||||
#define SYSCALL_TRACE_ENTER_EVENT(sname) \
|
#define SYSCALL_TRACE_ENTER_EVENT(sname) \
|
||||||
static struct syscall_metadata \
|
static struct syscall_metadata \
|
||||||
__attribute__((__aligned__(4))) __syscall_meta_##sname; \
|
__attribute__((__aligned__(4))) __syscall_meta_##sname; \
|
||||||
static struct ftrace_event_call \
|
|
||||||
__attribute__((__aligned__(4))) event_enter_##sname; \
|
|
||||||
static struct ftrace_event_call __used \
|
static struct ftrace_event_call __used \
|
||||||
__attribute__((__aligned__(4))) \
|
__attribute__((__aligned__(4))) \
|
||||||
__attribute__((section("_ftrace_events"))) \
|
__attribute__((section("_ftrace_events"))) \
|
||||||
@ -137,13 +135,12 @@ extern struct trace_event_functions exit_syscall_print_funcs;
|
|||||||
.class = &event_class_syscall_enter, \
|
.class = &event_class_syscall_enter, \
|
||||||
.event.funcs = &enter_syscall_print_funcs, \
|
.event.funcs = &enter_syscall_print_funcs, \
|
||||||
.data = (void *)&__syscall_meta_##sname,\
|
.data = (void *)&__syscall_meta_##sname,\
|
||||||
}
|
}; \
|
||||||
|
__TRACE_EVENT_FLAGS(enter_##sname, TRACE_EVENT_FL_CAP_ANY)
|
||||||
|
|
||||||
#define SYSCALL_TRACE_EXIT_EVENT(sname) \
|
#define SYSCALL_TRACE_EXIT_EVENT(sname) \
|
||||||
static struct syscall_metadata \
|
static struct syscall_metadata \
|
||||||
__attribute__((__aligned__(4))) __syscall_meta_##sname; \
|
__attribute__((__aligned__(4))) __syscall_meta_##sname; \
|
||||||
static struct ftrace_event_call \
|
|
||||||
__attribute__((__aligned__(4))) event_exit_##sname; \
|
|
||||||
static struct ftrace_event_call __used \
|
static struct ftrace_event_call __used \
|
||||||
__attribute__((__aligned__(4))) \
|
__attribute__((__aligned__(4))) \
|
||||||
__attribute__((section("_ftrace_events"))) \
|
__attribute__((section("_ftrace_events"))) \
|
||||||
@ -152,7 +149,8 @@ extern struct trace_event_functions exit_syscall_print_funcs;
|
|||||||
.class = &event_class_syscall_exit, \
|
.class = &event_class_syscall_exit, \
|
||||||
.event.funcs = &exit_syscall_print_funcs, \
|
.event.funcs = &exit_syscall_print_funcs, \
|
||||||
.data = (void *)&__syscall_meta_##sname,\
|
.data = (void *)&__syscall_meta_##sname,\
|
||||||
}
|
}; \
|
||||||
|
__TRACE_EVENT_FLAGS(exit_##sname, TRACE_EVENT_FL_CAP_ANY)
|
||||||
|
|
||||||
#define SYSCALL_METADATA(sname, nb) \
|
#define SYSCALL_METADATA(sname, nb) \
|
||||||
SYSCALL_TRACE_ENTER_EVENT(sname); \
|
SYSCALL_TRACE_ENTER_EVENT(sname); \
|
||||||
|
@ -234,6 +234,8 @@ do_trace: \
|
|||||||
PARAMS(void *__data, proto), \
|
PARAMS(void *__data, proto), \
|
||||||
PARAMS(__data, args))
|
PARAMS(__data, args))
|
||||||
|
|
||||||
|
#define TRACE_EVENT_FLAGS(event, flag)
|
||||||
|
|
||||||
#endif /* DECLARE_TRACE */
|
#endif /* DECLARE_TRACE */
|
||||||
|
|
||||||
#ifndef TRACE_EVENT
|
#ifndef TRACE_EVENT
|
||||||
@ -354,4 +356,6 @@ do_trace: \
|
|||||||
assign, print, reg, unreg) \
|
assign, print, reg, unreg) \
|
||||||
DECLARE_TRACE(name, PARAMS(proto), PARAMS(args))
|
DECLARE_TRACE(name, PARAMS(proto), PARAMS(args))
|
||||||
|
|
||||||
|
#define TRACE_EVENT_FLAGS(event, flag)
|
||||||
|
|
||||||
#endif /* ifdef TRACE_EVENT (see note above) */
|
#endif /* ifdef TRACE_EVENT (see note above) */
|
||||||
|
@ -40,6 +40,8 @@ TRACE_EVENT_FN(sys_enter,
|
|||||||
syscall_regfunc, syscall_unregfunc
|
syscall_regfunc, syscall_unregfunc
|
||||||
);
|
);
|
||||||
|
|
||||||
|
TRACE_EVENT_FLAGS(sys_enter, TRACE_EVENT_FL_CAP_ANY)
|
||||||
|
|
||||||
TRACE_EVENT_FN(sys_exit,
|
TRACE_EVENT_FN(sys_exit,
|
||||||
|
|
||||||
TP_PROTO(struct pt_regs *regs, long ret),
|
TP_PROTO(struct pt_regs *regs, long ret),
|
||||||
@ -62,6 +64,8 @@ TRACE_EVENT_FN(sys_exit,
|
|||||||
syscall_regfunc, syscall_unregfunc
|
syscall_regfunc, syscall_unregfunc
|
||||||
);
|
);
|
||||||
|
|
||||||
|
TRACE_EVENT_FLAGS(sys_exit, TRACE_EVENT_FL_CAP_ANY)
|
||||||
|
|
||||||
#endif /* CONFIG_HAVE_SYSCALL_TRACEPOINTS */
|
#endif /* CONFIG_HAVE_SYSCALL_TRACEPOINTS */
|
||||||
|
|
||||||
#endif /* _TRACE_EVENTS_SYSCALLS_H */
|
#endif /* _TRACE_EVENTS_SYSCALLS_H */
|
||||||
|
@ -82,6 +82,10 @@
|
|||||||
TRACE_EVENT(name, PARAMS(proto), PARAMS(args), \
|
TRACE_EVENT(name, PARAMS(proto), PARAMS(args), \
|
||||||
PARAMS(tstruct), PARAMS(assign), PARAMS(print)) \
|
PARAMS(tstruct), PARAMS(assign), PARAMS(print)) \
|
||||||
|
|
||||||
|
#undef TRACE_EVENT_FLAGS
|
||||||
|
#define TRACE_EVENT_FLAGS(name, value) \
|
||||||
|
__TRACE_EVENT_FLAGS(name, value)
|
||||||
|
|
||||||
#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
|
#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
|
||||||
|
|
||||||
|
|
||||||
@ -129,6 +133,9 @@
|
|||||||
#define DEFINE_EVENT_PRINT(template, name, proto, args, print) \
|
#define DEFINE_EVENT_PRINT(template, name, proto, args, print) \
|
||||||
DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args))
|
DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args))
|
||||||
|
|
||||||
|
#undef TRACE_EVENT_FLAGS
|
||||||
|
#define TRACE_EVENT_FLAGS(event, flag)
|
||||||
|
|
||||||
#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
|
#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -4773,15 +4773,6 @@ static int perf_tp_event_init(struct perf_event *event)
|
|||||||
if (event->attr.type != PERF_TYPE_TRACEPOINT)
|
if (event->attr.type != PERF_TYPE_TRACEPOINT)
|
||||||
return -ENOENT;
|
return -ENOENT;
|
||||||
|
|
||||||
/*
|
|
||||||
* Raw tracepoint data is a severe data leak, only allow root to
|
|
||||||
* have these.
|
|
||||||
*/
|
|
||||||
if ((event->attr.sample_type & PERF_SAMPLE_RAW) &&
|
|
||||||
perf_paranoid_tracepoint_raw() &&
|
|
||||||
!capable(CAP_SYS_ADMIN))
|
|
||||||
return -EPERM;
|
|
||||||
|
|
||||||
err = perf_trace_init(event);
|
err = perf_trace_init(event);
|
||||||
if (err)
|
if (err)
|
||||||
return err;
|
return err;
|
||||||
|
@ -21,17 +21,46 @@ typedef typeof(unsigned long [PERF_MAX_TRACE_SIZE / sizeof(unsigned long)])
|
|||||||
/* Count the events in use (per event id, not per instance) */
|
/* Count the events in use (per event id, not per instance) */
|
||||||
static int total_ref_count;
|
static int total_ref_count;
|
||||||
|
|
||||||
|
static int perf_trace_event_perm(struct ftrace_event_call *tp_event,
|
||||||
|
struct perf_event *p_event)
|
||||||
|
{
|
||||||
|
/* No tracing, just counting, so no obvious leak */
|
||||||
|
if (!(p_event->attr.sample_type & PERF_SAMPLE_RAW))
|
||||||
|
return 0;
|
||||||
|
|
||||||
|
/* Some events are ok to be traced by non-root users... */
|
||||||
|
if (p_event->attach_state == PERF_ATTACH_TASK) {
|
||||||
|
if (tp_event->flags & TRACE_EVENT_FL_CAP_ANY)
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* ...otherwise raw tracepoint data can be a severe data leak,
|
||||||
|
* only allow root to have these.
|
||||||
|
*/
|
||||||
|
if (perf_paranoid_tracepoint_raw() && !capable(CAP_SYS_ADMIN))
|
||||||
|
return -EPERM;
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
static int perf_trace_event_init(struct ftrace_event_call *tp_event,
|
static int perf_trace_event_init(struct ftrace_event_call *tp_event,
|
||||||
struct perf_event *p_event)
|
struct perf_event *p_event)
|
||||||
{
|
{
|
||||||
struct hlist_head __percpu *list;
|
struct hlist_head __percpu *list;
|
||||||
int ret = -ENOMEM;
|
int ret;
|
||||||
int cpu;
|
int cpu;
|
||||||
|
|
||||||
|
ret = perf_trace_event_perm(tp_event, p_event);
|
||||||
|
if (ret)
|
||||||
|
return ret;
|
||||||
|
|
||||||
p_event->tp_event = tp_event;
|
p_event->tp_event = tp_event;
|
||||||
if (tp_event->perf_refcount++ > 0)
|
if (tp_event->perf_refcount++ > 0)
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
|
ret = -ENOMEM;
|
||||||
|
|
||||||
list = alloc_percpu(struct hlist_head);
|
list = alloc_percpu(struct hlist_head);
|
||||||
if (!list)
|
if (!list)
|
||||||
goto fail;
|
goto fail;
|
||||||
|
Reference in New Issue
Block a user