Merge branch 'perf-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip
* 'perf-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip: (172 commits) perf_event, amd: Fix spinlock initialization perf_event: Fix preempt warning in perf_clock() perf tools: Flush maps on COMM events perf_events, x86: Split PMU definitions into separate files perf annotate: Handle samples not at objdump output addr boundaries perf_events, x86: Remove superflous MSR writes perf_events: Simplify code by removing cpu argument to hw_perf_group_sched_in() perf_events, x86: AMD event scheduling perf_events: Add new start/stop PMU callbacks perf_events: Report the MMAP pgoff value in bytes perf annotate: Defer allocating sym_priv->hist array perf symbols: Improve debugging information about symtab origins perf top: Use a macro instead of a constant variable perf symbols: Check the right return variable perf/scripts: Tag syscall_name helper as not yet available perf/scripts: Add perf-trace-python Documentation perf/scripts: Remove unnecessary PyTuple resizes perf/scripts: Add syscall tracing scripts perf/scripts: Add Python scripting engine perf/scripts: Remove check-perf-trace from listed scripts ... Fix trivial conflict in tools/perf/util/probe-event.c
This commit is contained in:
@@ -51,7 +51,9 @@ endif
|
||||
obj-$(CONFIG_EVENT_TRACING) += trace_events.o
|
||||
obj-$(CONFIG_EVENT_TRACING) += trace_export.o
|
||||
obj-$(CONFIG_FTRACE_SYSCALLS) += trace_syscalls.o
|
||||
obj-$(CONFIG_EVENT_PROFILE) += trace_event_profile.o
|
||||
ifeq ($(CONFIG_PERF_EVENTS),y)
|
||||
obj-$(CONFIG_EVENT_TRACING) += trace_event_profile.o
|
||||
endif
|
||||
obj-$(CONFIG_EVENT_TRACING) += trace_events_filter.o
|
||||
obj-$(CONFIG_KPROBE_EVENT) += trace_kprobe.o
|
||||
obj-$(CONFIG_KSYM_TRACER) += trace_ksym.o
|
||||
|
@@ -22,7 +22,6 @@
|
||||
#include <linux/hardirq.h>
|
||||
#include <linux/kthread.h>
|
||||
#include <linux/uaccess.h>
|
||||
#include <linux/kprobes.h>
|
||||
#include <linux/ftrace.h>
|
||||
#include <linux/sysctl.h>
|
||||
#include <linux/ctype.h>
|
||||
@@ -898,36 +897,6 @@ static struct dyn_ftrace *ftrace_free_records;
|
||||
} \
|
||||
}
|
||||
|
||||
#ifdef CONFIG_KPROBES
|
||||
|
||||
static int frozen_record_count;
|
||||
|
||||
static inline void freeze_record(struct dyn_ftrace *rec)
|
||||
{
|
||||
if (!(rec->flags & FTRACE_FL_FROZEN)) {
|
||||
rec->flags |= FTRACE_FL_FROZEN;
|
||||
frozen_record_count++;
|
||||
}
|
||||
}
|
||||
|
||||
static inline void unfreeze_record(struct dyn_ftrace *rec)
|
||||
{
|
||||
if (rec->flags & FTRACE_FL_FROZEN) {
|
||||
rec->flags &= ~FTRACE_FL_FROZEN;
|
||||
frozen_record_count--;
|
||||
}
|
||||
}
|
||||
|
||||
static inline int record_frozen(struct dyn_ftrace *rec)
|
||||
{
|
||||
return rec->flags & FTRACE_FL_FROZEN;
|
||||
}
|
||||
#else
|
||||
# define freeze_record(rec) ({ 0; })
|
||||
# define unfreeze_record(rec) ({ 0; })
|
||||
# define record_frozen(rec) ({ 0; })
|
||||
#endif /* CONFIG_KPROBES */
|
||||
|
||||
static void ftrace_free_rec(struct dyn_ftrace *rec)
|
||||
{
|
||||
rec->freelist = ftrace_free_records;
|
||||
@@ -1025,6 +994,21 @@ static void ftrace_bug(int failed, unsigned long ip)
|
||||
}
|
||||
|
||||
|
||||
/* Return 1 if the address range is reserved for ftrace */
|
||||
int ftrace_text_reserved(void *start, void *end)
|
||||
{
|
||||
struct dyn_ftrace *rec;
|
||||
struct ftrace_page *pg;
|
||||
|
||||
do_for_each_ftrace_rec(pg, rec) {
|
||||
if (rec->ip <= (unsigned long)end &&
|
||||
rec->ip + MCOUNT_INSN_SIZE > (unsigned long)start)
|
||||
return 1;
|
||||
} while_for_each_ftrace_rec();
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
static int
|
||||
__ftrace_replace_code(struct dyn_ftrace *rec, int enable)
|
||||
{
|
||||
@@ -1076,14 +1060,6 @@ static void ftrace_replace_code(int enable)
|
||||
!(rec->flags & FTRACE_FL_CONVERTED))
|
||||
continue;
|
||||
|
||||
/* ignore updates to this record's mcount site */
|
||||
if (get_kprobe((void *)rec->ip)) {
|
||||
freeze_record(rec);
|
||||
continue;
|
||||
} else {
|
||||
unfreeze_record(rec);
|
||||
}
|
||||
|
||||
failed = __ftrace_replace_code(rec, enable);
|
||||
if (failed) {
|
||||
rec->flags |= FTRACE_FL_FAILED;
|
||||
|
@@ -6,14 +6,12 @@
|
||||
*/
|
||||
|
||||
#include <linux/module.h>
|
||||
#include <linux/kprobes.h>
|
||||
#include "trace.h"
|
||||
|
||||
|
||||
char *perf_trace_buf;
|
||||
EXPORT_SYMBOL_GPL(perf_trace_buf);
|
||||
|
||||
char *perf_trace_buf_nmi;
|
||||
EXPORT_SYMBOL_GPL(perf_trace_buf_nmi);
|
||||
static char *perf_trace_buf;
|
||||
static char *perf_trace_buf_nmi;
|
||||
|
||||
typedef typeof(char [FTRACE_MAX_PROFILE_SIZE]) perf_trace_t ;
|
||||
|
||||
@@ -120,3 +118,47 @@ void ftrace_profile_disable(int event_id)
|
||||
}
|
||||
mutex_unlock(&event_mutex);
|
||||
}
|
||||
|
||||
__kprobes void *ftrace_perf_buf_prepare(int size, unsigned short type,
|
||||
int *rctxp, unsigned long *irq_flags)
|
||||
{
|
||||
struct trace_entry *entry;
|
||||
char *trace_buf, *raw_data;
|
||||
int pc, cpu;
|
||||
|
||||
pc = preempt_count();
|
||||
|
||||
/* Protect the per cpu buffer, begin the rcu read side */
|
||||
local_irq_save(*irq_flags);
|
||||
|
||||
*rctxp = perf_swevent_get_recursion_context();
|
||||
if (*rctxp < 0)
|
||||
goto err_recursion;
|
||||
|
||||
cpu = smp_processor_id();
|
||||
|
||||
if (in_nmi())
|
||||
trace_buf = rcu_dereference(perf_trace_buf_nmi);
|
||||
else
|
||||
trace_buf = rcu_dereference(perf_trace_buf);
|
||||
|
||||
if (!trace_buf)
|
||||
goto err;
|
||||
|
||||
raw_data = per_cpu_ptr(trace_buf, cpu);
|
||||
|
||||
/* zero the dead bytes from align to not leak stack to user */
|
||||
*(u64 *)(&raw_data[size - sizeof(u64)]) = 0ULL;
|
||||
|
||||
entry = (struct trace_entry *)raw_data;
|
||||
tracing_generic_entry_update(entry, *irq_flags, pc);
|
||||
entry->type = type;
|
||||
|
||||
return raw_data;
|
||||
err:
|
||||
perf_swevent_put_recursion_context(*rctxp);
|
||||
err_recursion:
|
||||
local_irq_restore(*irq_flags);
|
||||
return NULL;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(ftrace_perf_buf_prepare);
|
||||
|
@@ -1371,7 +1371,7 @@ out_unlock:
|
||||
return err;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_EVENT_PROFILE
|
||||
#ifdef CONFIG_PERF_EVENTS
|
||||
|
||||
void ftrace_profile_free_filter(struct perf_event *event)
|
||||
{
|
||||
@@ -1439,5 +1439,5 @@ out_unlock:
|
||||
return err;
|
||||
}
|
||||
|
||||
#endif /* CONFIG_EVENT_PROFILE */
|
||||
#endif /* CONFIG_PERF_EVENTS */
|
||||
|
||||
|
@@ -91,11 +91,6 @@ static __kprobes unsigned long fetch_memory(struct pt_regs *regs, void *addr)
|
||||
return retval;
|
||||
}
|
||||
|
||||
static __kprobes unsigned long fetch_argument(struct pt_regs *regs, void *num)
|
||||
{
|
||||
return regs_get_argument_nth(regs, (unsigned int)((unsigned long)num));
|
||||
}
|
||||
|
||||
static __kprobes unsigned long fetch_retvalue(struct pt_regs *regs,
|
||||
void *dummy)
|
||||
{
|
||||
@@ -231,9 +226,7 @@ static int probe_arg_string(char *buf, size_t n, struct fetch_func *ff)
|
||||
{
|
||||
int ret = -EINVAL;
|
||||
|
||||
if (ff->func == fetch_argument)
|
||||
ret = snprintf(buf, n, "$arg%lu", (unsigned long)ff->data);
|
||||
else if (ff->func == fetch_register) {
|
||||
if (ff->func == fetch_register) {
|
||||
const char *name;
|
||||
name = regs_query_register_name((unsigned int)((long)ff->data));
|
||||
ret = snprintf(buf, n, "%%%s", name);
|
||||
@@ -489,14 +482,6 @@ static int parse_probe_vars(char *arg, struct fetch_func *ff, int is_return)
|
||||
}
|
||||
} else
|
||||
ret = -EINVAL;
|
||||
} else if (strncmp(arg, "arg", 3) == 0 && isdigit(arg[3])) {
|
||||
ret = strict_strtoul(arg + 3, 10, ¶m);
|
||||
if (ret || param > PARAM_MAX_ARGS)
|
||||
ret = -EINVAL;
|
||||
else {
|
||||
ff->func = fetch_argument;
|
||||
ff->data = (void *)param;
|
||||
}
|
||||
} else
|
||||
ret = -EINVAL;
|
||||
return ret;
|
||||
@@ -611,7 +596,6 @@ static int create_trace_probe(int argc, char **argv)
|
||||
* - Add kprobe: p[:[GRP/]EVENT] KSYM[+OFFS]|KADDR [FETCHARGS]
|
||||
* - Add kretprobe: r[:[GRP/]EVENT] KSYM[+0] [FETCHARGS]
|
||||
* Fetch args:
|
||||
* $argN : fetch Nth of function argument. (N:0-)
|
||||
* $retval : fetch return value
|
||||
* $stack : fetch stack address
|
||||
* $stackN : fetch Nth of stack (N:0-)
|
||||
@@ -958,7 +942,7 @@ static const struct file_operations kprobe_profile_ops = {
|
||||
};
|
||||
|
||||
/* Kprobe handler */
|
||||
static __kprobes int kprobe_trace_func(struct kprobe *kp, struct pt_regs *regs)
|
||||
static __kprobes void kprobe_trace_func(struct kprobe *kp, struct pt_regs *regs)
|
||||
{
|
||||
struct trace_probe *tp = container_of(kp, struct trace_probe, rp.kp);
|
||||
struct kprobe_trace_entry *entry;
|
||||
@@ -978,7 +962,7 @@ static __kprobes int kprobe_trace_func(struct kprobe *kp, struct pt_regs *regs)
|
||||
event = trace_current_buffer_lock_reserve(&buffer, call->id, size,
|
||||
irq_flags, pc);
|
||||
if (!event)
|
||||
return 0;
|
||||
return;
|
||||
|
||||
entry = ring_buffer_event_data(event);
|
||||
entry->nargs = tp->nr_args;
|
||||
@@ -988,11 +972,10 @@ static __kprobes int kprobe_trace_func(struct kprobe *kp, struct pt_regs *regs)
|
||||
|
||||
if (!filter_current_check_discard(buffer, call, entry, event))
|
||||
trace_nowake_buffer_unlock_commit(buffer, event, irq_flags, pc);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Kretprobe handler */
|
||||
static __kprobes int kretprobe_trace_func(struct kretprobe_instance *ri,
|
||||
static __kprobes void kretprobe_trace_func(struct kretprobe_instance *ri,
|
||||
struct pt_regs *regs)
|
||||
{
|
||||
struct trace_probe *tp = container_of(ri->rp, struct trace_probe, rp);
|
||||
@@ -1011,7 +994,7 @@ static __kprobes int kretprobe_trace_func(struct kretprobe_instance *ri,
|
||||
event = trace_current_buffer_lock_reserve(&buffer, call->id, size,
|
||||
irq_flags, pc);
|
||||
if (!event)
|
||||
return 0;
|
||||
return;
|
||||
|
||||
entry = ring_buffer_event_data(event);
|
||||
entry->nargs = tp->nr_args;
|
||||
@@ -1022,8 +1005,6 @@ static __kprobes int kretprobe_trace_func(struct kretprobe_instance *ri,
|
||||
|
||||
if (!filter_current_check_discard(buffer, call, entry, event))
|
||||
trace_nowake_buffer_unlock_commit(buffer, event, irq_flags, pc);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Event entry printers */
|
||||
@@ -1230,137 +1211,67 @@ static int set_print_fmt(struct trace_probe *tp)
|
||||
return 0;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_EVENT_PROFILE
|
||||
#ifdef CONFIG_PERF_EVENTS
|
||||
|
||||
/* Kprobe profile handler */
|
||||
static __kprobes int kprobe_profile_func(struct kprobe *kp,
|
||||
static __kprobes void kprobe_profile_func(struct kprobe *kp,
|
||||
struct pt_regs *regs)
|
||||
{
|
||||
struct trace_probe *tp = container_of(kp, struct trace_probe, rp.kp);
|
||||
struct ftrace_event_call *call = &tp->call;
|
||||
struct kprobe_trace_entry *entry;
|
||||
struct trace_entry *ent;
|
||||
int size, __size, i, pc, __cpu;
|
||||
int size, __size, i;
|
||||
unsigned long irq_flags;
|
||||
char *trace_buf;
|
||||
char *raw_data;
|
||||
int rctx;
|
||||
|
||||
pc = preempt_count();
|
||||
__size = SIZEOF_KPROBE_TRACE_ENTRY(tp->nr_args);
|
||||
size = ALIGN(__size + sizeof(u32), sizeof(u64));
|
||||
size -= sizeof(u32);
|
||||
if (WARN_ONCE(size > FTRACE_MAX_PROFILE_SIZE,
|
||||
"profile buffer not large enough"))
|
||||
return 0;
|
||||
return;
|
||||
|
||||
/*
|
||||
* Protect the non nmi buffer
|
||||
* This also protects the rcu read side
|
||||
*/
|
||||
local_irq_save(irq_flags);
|
||||
entry = ftrace_perf_buf_prepare(size, call->id, &rctx, &irq_flags);
|
||||
if (!entry)
|
||||
return;
|
||||
|
||||
rctx = perf_swevent_get_recursion_context();
|
||||
if (rctx < 0)
|
||||
goto end_recursion;
|
||||
|
||||
__cpu = smp_processor_id();
|
||||
|
||||
if (in_nmi())
|
||||
trace_buf = rcu_dereference(perf_trace_buf_nmi);
|
||||
else
|
||||
trace_buf = rcu_dereference(perf_trace_buf);
|
||||
|
||||
if (!trace_buf)
|
||||
goto end;
|
||||
|
||||
raw_data = per_cpu_ptr(trace_buf, __cpu);
|
||||
|
||||
/* Zero dead bytes from alignment to avoid buffer leak to userspace */
|
||||
*(u64 *)(&raw_data[size - sizeof(u64)]) = 0ULL;
|
||||
entry = (struct kprobe_trace_entry *)raw_data;
|
||||
ent = &entry->ent;
|
||||
|
||||
tracing_generic_entry_update(ent, irq_flags, pc);
|
||||
ent->type = call->id;
|
||||
entry->nargs = tp->nr_args;
|
||||
entry->ip = (unsigned long)kp->addr;
|
||||
for (i = 0; i < tp->nr_args; i++)
|
||||
entry->args[i] = call_fetch(&tp->args[i].fetch, regs);
|
||||
perf_tp_event(call->id, entry->ip, 1, entry, size);
|
||||
|
||||
end:
|
||||
perf_swevent_put_recursion_context(rctx);
|
||||
end_recursion:
|
||||
local_irq_restore(irq_flags);
|
||||
|
||||
return 0;
|
||||
ftrace_perf_buf_submit(entry, size, rctx, entry->ip, 1, irq_flags);
|
||||
}
|
||||
|
||||
/* Kretprobe profile handler */
|
||||
static __kprobes int kretprobe_profile_func(struct kretprobe_instance *ri,
|
||||
static __kprobes void kretprobe_profile_func(struct kretprobe_instance *ri,
|
||||
struct pt_regs *regs)
|
||||
{
|
||||
struct trace_probe *tp = container_of(ri->rp, struct trace_probe, rp);
|
||||
struct ftrace_event_call *call = &tp->call;
|
||||
struct kretprobe_trace_entry *entry;
|
||||
struct trace_entry *ent;
|
||||
int size, __size, i, pc, __cpu;
|
||||
int size, __size, i;
|
||||
unsigned long irq_flags;
|
||||
char *trace_buf;
|
||||
char *raw_data;
|
||||
int rctx;
|
||||
|
||||
pc = preempt_count();
|
||||
__size = SIZEOF_KRETPROBE_TRACE_ENTRY(tp->nr_args);
|
||||
size = ALIGN(__size + sizeof(u32), sizeof(u64));
|
||||
size -= sizeof(u32);
|
||||
if (WARN_ONCE(size > FTRACE_MAX_PROFILE_SIZE,
|
||||
"profile buffer not large enough"))
|
||||
return 0;
|
||||
return;
|
||||
|
||||
/*
|
||||
* Protect the non nmi buffer
|
||||
* This also protects the rcu read side
|
||||
*/
|
||||
local_irq_save(irq_flags);
|
||||
entry = ftrace_perf_buf_prepare(size, call->id, &rctx, &irq_flags);
|
||||
if (!entry)
|
||||
return;
|
||||
|
||||
rctx = perf_swevent_get_recursion_context();
|
||||
if (rctx < 0)
|
||||
goto end_recursion;
|
||||
|
||||
__cpu = smp_processor_id();
|
||||
|
||||
if (in_nmi())
|
||||
trace_buf = rcu_dereference(perf_trace_buf_nmi);
|
||||
else
|
||||
trace_buf = rcu_dereference(perf_trace_buf);
|
||||
|
||||
if (!trace_buf)
|
||||
goto end;
|
||||
|
||||
raw_data = per_cpu_ptr(trace_buf, __cpu);
|
||||
|
||||
/* Zero dead bytes from alignment to avoid buffer leak to userspace */
|
||||
*(u64 *)(&raw_data[size - sizeof(u64)]) = 0ULL;
|
||||
entry = (struct kretprobe_trace_entry *)raw_data;
|
||||
ent = &entry->ent;
|
||||
|
||||
tracing_generic_entry_update(ent, irq_flags, pc);
|
||||
ent->type = call->id;
|
||||
entry->nargs = tp->nr_args;
|
||||
entry->func = (unsigned long)tp->rp.kp.addr;
|
||||
entry->ret_ip = (unsigned long)ri->ret_addr;
|
||||
for (i = 0; i < tp->nr_args; i++)
|
||||
entry->args[i] = call_fetch(&tp->args[i].fetch, regs);
|
||||
perf_tp_event(call->id, entry->ret_ip, 1, entry, size);
|
||||
|
||||
end:
|
||||
perf_swevent_put_recursion_context(rctx);
|
||||
end_recursion:
|
||||
local_irq_restore(irq_flags);
|
||||
|
||||
return 0;
|
||||
ftrace_perf_buf_submit(entry, size, rctx, entry->ret_ip, 1, irq_flags);
|
||||
}
|
||||
|
||||
static int probe_profile_enable(struct ftrace_event_call *call)
|
||||
@@ -1388,7 +1299,7 @@ static void probe_profile_disable(struct ftrace_event_call *call)
|
||||
disable_kprobe(&tp->rp.kp);
|
||||
}
|
||||
}
|
||||
#endif /* CONFIG_EVENT_PROFILE */
|
||||
#endif /* CONFIG_PERF_EVENTS */
|
||||
|
||||
|
||||
static __kprobes
|
||||
@@ -1398,10 +1309,10 @@ int kprobe_dispatcher(struct kprobe *kp, struct pt_regs *regs)
|
||||
|
||||
if (tp->flags & TP_FLAG_TRACE)
|
||||
kprobe_trace_func(kp, regs);
|
||||
#ifdef CONFIG_EVENT_PROFILE
|
||||
#ifdef CONFIG_PERF_EVENTS
|
||||
if (tp->flags & TP_FLAG_PROFILE)
|
||||
kprobe_profile_func(kp, regs);
|
||||
#endif /* CONFIG_EVENT_PROFILE */
|
||||
#endif
|
||||
return 0; /* We don't tweek kernel, so just return 0 */
|
||||
}
|
||||
|
||||
@@ -1412,10 +1323,10 @@ int kretprobe_dispatcher(struct kretprobe_instance *ri, struct pt_regs *regs)
|
||||
|
||||
if (tp->flags & TP_FLAG_TRACE)
|
||||
kretprobe_trace_func(ri, regs);
|
||||
#ifdef CONFIG_EVENT_PROFILE
|
||||
#ifdef CONFIG_PERF_EVENTS
|
||||
if (tp->flags & TP_FLAG_PROFILE)
|
||||
kretprobe_profile_func(ri, regs);
|
||||
#endif /* CONFIG_EVENT_PROFILE */
|
||||
#endif
|
||||
return 0; /* We don't tweek kernel, so just return 0 */
|
||||
}
|
||||
|
||||
@@ -1446,7 +1357,7 @@ static int register_probe_event(struct trace_probe *tp)
|
||||
call->regfunc = probe_event_enable;
|
||||
call->unregfunc = probe_event_disable;
|
||||
|
||||
#ifdef CONFIG_EVENT_PROFILE
|
||||
#ifdef CONFIG_PERF_EVENTS
|
||||
call->profile_enable = probe_profile_enable;
|
||||
call->profile_disable = probe_profile_disable;
|
||||
#endif
|
||||
@@ -1507,28 +1418,67 @@ static int kprobe_trace_selftest_target(int a1, int a2, int a3,
|
||||
|
||||
static __init int kprobe_trace_self_tests_init(void)
|
||||
{
|
||||
int ret;
|
||||
int ret, warn = 0;
|
||||
int (*target)(int, int, int, int, int, int);
|
||||
struct trace_probe *tp;
|
||||
|
||||
target = kprobe_trace_selftest_target;
|
||||
|
||||
pr_info("Testing kprobe tracing: ");
|
||||
|
||||
ret = command_trace_probe("p:testprobe kprobe_trace_selftest_target "
|
||||
"$arg1 $arg2 $arg3 $arg4 $stack $stack0");
|
||||
if (WARN_ON_ONCE(ret))
|
||||
pr_warning("error enabling function entry\n");
|
||||
"$stack $stack0 +0($stack)");
|
||||
if (WARN_ON_ONCE(ret)) {
|
||||
pr_warning("error on probing function entry.\n");
|
||||
warn++;
|
||||
} else {
|
||||
/* Enable trace point */
|
||||
tp = find_probe_event("testprobe", KPROBE_EVENT_SYSTEM);
|
||||
if (WARN_ON_ONCE(tp == NULL)) {
|
||||
pr_warning("error on getting new probe.\n");
|
||||
warn++;
|
||||
} else
|
||||
probe_event_enable(&tp->call);
|
||||
}
|
||||
|
||||
ret = command_trace_probe("r:testprobe2 kprobe_trace_selftest_target "
|
||||
"$retval");
|
||||
if (WARN_ON_ONCE(ret))
|
||||
pr_warning("error enabling function return\n");
|
||||
if (WARN_ON_ONCE(ret)) {
|
||||
pr_warning("error on probing function return.\n");
|
||||
warn++;
|
||||
} else {
|
||||
/* Enable trace point */
|
||||
tp = find_probe_event("testprobe2", KPROBE_EVENT_SYSTEM);
|
||||
if (WARN_ON_ONCE(tp == NULL)) {
|
||||
pr_warning("error on getting new probe.\n");
|
||||
warn++;
|
||||
} else
|
||||
probe_event_enable(&tp->call);
|
||||
}
|
||||
|
||||
if (warn)
|
||||
goto end;
|
||||
|
||||
ret = target(1, 2, 3, 4, 5, 6);
|
||||
|
||||
cleanup_all_probes();
|
||||
ret = command_trace_probe("-:testprobe");
|
||||
if (WARN_ON_ONCE(ret)) {
|
||||
pr_warning("error on deleting a probe.\n");
|
||||
warn++;
|
||||
}
|
||||
|
||||
pr_cont("OK\n");
|
||||
ret = command_trace_probe("-:testprobe2");
|
||||
if (WARN_ON_ONCE(ret)) {
|
||||
pr_warning("error on deleting a probe.\n");
|
||||
warn++;
|
||||
}
|
||||
|
||||
end:
|
||||
cleanup_all_probes();
|
||||
if (warn)
|
||||
pr_cont("NG: Some tests are failed. Please check them.\n");
|
||||
else
|
||||
pr_cont("OK\n");
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@@ -426,7 +426,7 @@ int __init init_ftrace_syscalls(void)
|
||||
}
|
||||
core_initcall(init_ftrace_syscalls);
|
||||
|
||||
#ifdef CONFIG_EVENT_PROFILE
|
||||
#ifdef CONFIG_PERF_EVENTS
|
||||
|
||||
static DECLARE_BITMAP(enabled_prof_enter_syscalls, NR_syscalls);
|
||||
static DECLARE_BITMAP(enabled_prof_exit_syscalls, NR_syscalls);
|
||||
@@ -438,12 +438,9 @@ static void prof_syscall_enter(struct pt_regs *regs, long id)
|
||||
struct syscall_metadata *sys_data;
|
||||
struct syscall_trace_enter *rec;
|
||||
unsigned long flags;
|
||||
char *trace_buf;
|
||||
char *raw_data;
|
||||
int syscall_nr;
|
||||
int rctx;
|
||||
int size;
|
||||
int cpu;
|
||||
|
||||
syscall_nr = syscall_get_nr(current, regs);
|
||||
if (!test_bit(syscall_nr, enabled_prof_enter_syscalls))
|
||||
@@ -462,37 +459,15 @@ static void prof_syscall_enter(struct pt_regs *regs, long id)
|
||||
"profile buffer not large enough"))
|
||||
return;
|
||||
|
||||
/* Protect the per cpu buffer, begin the rcu read side */
|
||||
local_irq_save(flags);
|
||||
rec = (struct syscall_trace_enter *)ftrace_perf_buf_prepare(size,
|
||||
sys_data->enter_event->id, &rctx, &flags);
|
||||
if (!rec)
|
||||
return;
|
||||
|
||||
rctx = perf_swevent_get_recursion_context();
|
||||
if (rctx < 0)
|
||||
goto end_recursion;
|
||||
|
||||
cpu = smp_processor_id();
|
||||
|
||||
trace_buf = rcu_dereference(perf_trace_buf);
|
||||
|
||||
if (!trace_buf)
|
||||
goto end;
|
||||
|
||||
raw_data = per_cpu_ptr(trace_buf, cpu);
|
||||
|
||||
/* zero the dead bytes from align to not leak stack to user */
|
||||
*(u64 *)(&raw_data[size - sizeof(u64)]) = 0ULL;
|
||||
|
||||
rec = (struct syscall_trace_enter *) raw_data;
|
||||
tracing_generic_entry_update(&rec->ent, 0, 0);
|
||||
rec->ent.type = sys_data->enter_event->id;
|
||||
rec->nr = syscall_nr;
|
||||
syscall_get_arguments(current, regs, 0, sys_data->nb_args,
|
||||
(unsigned long *)&rec->args);
|
||||
perf_tp_event(sys_data->enter_event->id, 0, 1, rec, size);
|
||||
|
||||
end:
|
||||
perf_swevent_put_recursion_context(rctx);
|
||||
end_recursion:
|
||||
local_irq_restore(flags);
|
||||
ftrace_perf_buf_submit(rec, size, rctx, 0, 1, flags);
|
||||
}
|
||||
|
||||
int prof_sysenter_enable(struct ftrace_event_call *call)
|
||||
@@ -536,11 +511,8 @@ static void prof_syscall_exit(struct pt_regs *regs, long ret)
|
||||
struct syscall_trace_exit *rec;
|
||||
unsigned long flags;
|
||||
int syscall_nr;
|
||||
char *trace_buf;
|
||||
char *raw_data;
|
||||
int rctx;
|
||||
int size;
|
||||
int cpu;
|
||||
|
||||
syscall_nr = syscall_get_nr(current, regs);
|
||||
if (!test_bit(syscall_nr, enabled_prof_exit_syscalls))
|
||||
@@ -562,38 +534,15 @@ static void prof_syscall_exit(struct pt_regs *regs, long ret)
|
||||
"exit event has grown above profile buffer size"))
|
||||
return;
|
||||
|
||||
/* Protect the per cpu buffer, begin the rcu read side */
|
||||
local_irq_save(flags);
|
||||
rec = (struct syscall_trace_exit *)ftrace_perf_buf_prepare(size,
|
||||
sys_data->exit_event->id, &rctx, &flags);
|
||||
if (!rec)
|
||||
return;
|
||||
|
||||
rctx = perf_swevent_get_recursion_context();
|
||||
if (rctx < 0)
|
||||
goto end_recursion;
|
||||
|
||||
cpu = smp_processor_id();
|
||||
|
||||
trace_buf = rcu_dereference(perf_trace_buf);
|
||||
|
||||
if (!trace_buf)
|
||||
goto end;
|
||||
|
||||
raw_data = per_cpu_ptr(trace_buf, cpu);
|
||||
|
||||
/* zero the dead bytes from align to not leak stack to user */
|
||||
*(u64 *)(&raw_data[size - sizeof(u64)]) = 0ULL;
|
||||
|
||||
rec = (struct syscall_trace_exit *)raw_data;
|
||||
|
||||
tracing_generic_entry_update(&rec->ent, 0, 0);
|
||||
rec->ent.type = sys_data->exit_event->id;
|
||||
rec->nr = syscall_nr;
|
||||
rec->ret = syscall_get_return_value(current, regs);
|
||||
|
||||
perf_tp_event(sys_data->exit_event->id, 0, 1, rec, size);
|
||||
|
||||
end:
|
||||
perf_swevent_put_recursion_context(rctx);
|
||||
end_recursion:
|
||||
local_irq_restore(flags);
|
||||
ftrace_perf_buf_submit(rec, size, rctx, 0, 1, flags);
|
||||
}
|
||||
|
||||
int prof_sysexit_enable(struct ftrace_event_call *call)
|
||||
@@ -631,6 +580,5 @@ void prof_sysexit_disable(struct ftrace_event_call *call)
|
||||
mutex_unlock(&syscall_trace_lock);
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
#endif /* CONFIG_PERF_EVENTS */
|
||||
|
||||
|
Reference in New Issue
Block a user