Merge branch 'perf-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip
* 'perf-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip: (35 commits) perf: Fix unexported generic perf_arch_fetch_caller_regs perf record: Don't try to find buildids in a zero sized file perf: export perf_trace_regs and perf_arch_fetch_caller_regs perf, x86: Fix hw_perf_enable() event assignment perf, ppc: Fix compile error due to new cpu notifiers perf: Make the install relative to DESTDIR if specified kprobes: Calculate the index correctly when freeing the out-of-line execution slot perf tools: Fix sparse CPU numbering related bugs perf_event: Fix oops triggered by cpu offline/online perf: Drop the obsolete profile naming for trace events perf: Take a hot regs snapshot for trace events perf: Introduce new perf_fetch_caller_regs() for hot regs snapshot perf/x86-64: Use frame pointer to walk on irq and process stacks lockdep: Move lock events under lockdep recursion protection perf report: Print the map table just after samples for which no map was found perf report: Add multiple event support perf session: Change perf_session post processing functions to take histogram tree perf session: Add storage for seperating event types in report perf session: Change add_hist_entry to take the tree root instead of session perf record: Add ID and to recorded event data when recording multiple events ...
This commit is contained in:
@@ -259,7 +259,8 @@ static void __kprobes __free_insn_slot(struct kprobe_insn_cache *c,
|
||||
struct kprobe_insn_page *kip;
|
||||
|
||||
list_for_each_entry(kip, &c->pages, list) {
|
||||
long idx = ((long)slot - (long)kip->insns) / c->insn_size;
|
||||
long idx = ((long)slot - (long)kip->insns) /
|
||||
(c->insn_size * sizeof(kprobe_opcode_t));
|
||||
if (idx >= 0 && idx < slots_per_page(c)) {
|
||||
WARN_ON(kip->slot_used[idx] != SLOT_USED);
|
||||
if (dirty) {
|
||||
|
@@ -3211,8 +3211,6 @@ void lock_acquire(struct lockdep_map *lock, unsigned int subclass,
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
trace_lock_acquire(lock, subclass, trylock, read, check, nest_lock, ip);
|
||||
|
||||
if (unlikely(current->lockdep_recursion))
|
||||
return;
|
||||
|
||||
@@ -3220,6 +3218,7 @@ void lock_acquire(struct lockdep_map *lock, unsigned int subclass,
|
||||
check_flags(flags);
|
||||
|
||||
current->lockdep_recursion = 1;
|
||||
trace_lock_acquire(lock, subclass, trylock, read, check, nest_lock, ip);
|
||||
__lock_acquire(lock, subclass, trylock, read, check,
|
||||
irqs_disabled_flags(flags), nest_lock, ip, 0);
|
||||
current->lockdep_recursion = 0;
|
||||
@@ -3232,14 +3231,13 @@ void lock_release(struct lockdep_map *lock, int nested,
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
trace_lock_release(lock, nested, ip);
|
||||
|
||||
if (unlikely(current->lockdep_recursion))
|
||||
return;
|
||||
|
||||
raw_local_irq_save(flags);
|
||||
check_flags(flags);
|
||||
current->lockdep_recursion = 1;
|
||||
trace_lock_release(lock, nested, ip);
|
||||
__lock_release(lock, nested, ip);
|
||||
current->lockdep_recursion = 0;
|
||||
raw_local_irq_restore(flags);
|
||||
@@ -3413,8 +3411,6 @@ void lock_contended(struct lockdep_map *lock, unsigned long ip)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
trace_lock_contended(lock, ip);
|
||||
|
||||
if (unlikely(!lock_stat))
|
||||
return;
|
||||
|
||||
@@ -3424,6 +3420,7 @@ void lock_contended(struct lockdep_map *lock, unsigned long ip)
|
||||
raw_local_irq_save(flags);
|
||||
check_flags(flags);
|
||||
current->lockdep_recursion = 1;
|
||||
trace_lock_contended(lock, ip);
|
||||
__lock_contended(lock, ip);
|
||||
current->lockdep_recursion = 0;
|
||||
raw_local_irq_restore(flags);
|
||||
|
@@ -81,10 +81,6 @@ extern __weak const struct pmu *hw_perf_event_init(struct perf_event *event)
|
||||
void __weak hw_perf_disable(void) { barrier(); }
|
||||
void __weak hw_perf_enable(void) { barrier(); }
|
||||
|
||||
void __weak hw_perf_event_setup(int cpu) { barrier(); }
|
||||
void __weak hw_perf_event_setup_online(int cpu) { barrier(); }
|
||||
void __weak hw_perf_event_setup_offline(int cpu) { barrier(); }
|
||||
|
||||
int __weak
|
||||
hw_perf_group_sched_in(struct perf_event *group_leader,
|
||||
struct perf_cpu_context *cpuctx,
|
||||
@@ -97,25 +93,15 @@ void __weak perf_event_print_debug(void) { }
|
||||
|
||||
static DEFINE_PER_CPU(int, perf_disable_count);
|
||||
|
||||
void __perf_disable(void)
|
||||
{
|
||||
__get_cpu_var(perf_disable_count)++;
|
||||
}
|
||||
|
||||
bool __perf_enable(void)
|
||||
{
|
||||
return !--__get_cpu_var(perf_disable_count);
|
||||
}
|
||||
|
||||
void perf_disable(void)
|
||||
{
|
||||
__perf_disable();
|
||||
hw_perf_disable();
|
||||
if (!__get_cpu_var(perf_disable_count)++)
|
||||
hw_perf_disable();
|
||||
}
|
||||
|
||||
void perf_enable(void)
|
||||
{
|
||||
if (__perf_enable())
|
||||
if (!--__get_cpu_var(perf_disable_count))
|
||||
hw_perf_enable();
|
||||
}
|
||||
|
||||
@@ -1538,12 +1524,15 @@ static void perf_ctx_adjust_freq(struct perf_event_context *ctx)
|
||||
*/
|
||||
if (interrupts == MAX_INTERRUPTS) {
|
||||
perf_log_throttle(event, 1);
|
||||
perf_disable();
|
||||
event->pmu->unthrottle(event);
|
||||
perf_enable();
|
||||
}
|
||||
|
||||
if (!event->attr.freq || !event->attr.sample_freq)
|
||||
continue;
|
||||
|
||||
perf_disable();
|
||||
event->pmu->read(event);
|
||||
now = atomic64_read(&event->count);
|
||||
delta = now - hwc->freq_count_stamp;
|
||||
@@ -1551,6 +1540,7 @@ static void perf_ctx_adjust_freq(struct perf_event_context *ctx)
|
||||
|
||||
if (delta > 0)
|
||||
perf_adjust_period(event, TICK_NSEC, delta);
|
||||
perf_enable();
|
||||
}
|
||||
raw_spin_unlock(&ctx->lock);
|
||||
}
|
||||
@@ -1560,9 +1550,6 @@ static void perf_ctx_adjust_freq(struct perf_event_context *ctx)
|
||||
*/
|
||||
static void rotate_ctx(struct perf_event_context *ctx)
|
||||
{
|
||||
if (!ctx->nr_events)
|
||||
return;
|
||||
|
||||
raw_spin_lock(&ctx->lock);
|
||||
|
||||
/* Rotate the first entry last of non-pinned groups */
|
||||
@@ -1575,19 +1562,28 @@ void perf_event_task_tick(struct task_struct *curr)
|
||||
{
|
||||
struct perf_cpu_context *cpuctx;
|
||||
struct perf_event_context *ctx;
|
||||
int rotate = 0;
|
||||
|
||||
if (!atomic_read(&nr_events))
|
||||
return;
|
||||
|
||||
cpuctx = &__get_cpu_var(perf_cpu_context);
|
||||
ctx = curr->perf_event_ctxp;
|
||||
if (cpuctx->ctx.nr_events &&
|
||||
cpuctx->ctx.nr_events != cpuctx->ctx.nr_active)
|
||||
rotate = 1;
|
||||
|
||||
perf_disable();
|
||||
ctx = curr->perf_event_ctxp;
|
||||
if (ctx && ctx->nr_events && ctx->nr_events != ctx->nr_active)
|
||||
rotate = 1;
|
||||
|
||||
perf_ctx_adjust_freq(&cpuctx->ctx);
|
||||
if (ctx)
|
||||
perf_ctx_adjust_freq(ctx);
|
||||
|
||||
if (!rotate)
|
||||
return;
|
||||
|
||||
perf_disable();
|
||||
cpu_ctx_sched_out(cpuctx, EVENT_FLEXIBLE);
|
||||
if (ctx)
|
||||
task_ctx_sched_out(ctx, EVENT_FLEXIBLE);
|
||||
@@ -1599,7 +1595,6 @@ void perf_event_task_tick(struct task_struct *curr)
|
||||
cpu_ctx_sched_in(cpuctx, EVENT_FLEXIBLE);
|
||||
if (ctx)
|
||||
task_ctx_sched_in(curr, EVENT_FLEXIBLE);
|
||||
|
||||
perf_enable();
|
||||
}
|
||||
|
||||
@@ -2791,6 +2786,13 @@ __weak struct perf_callchain_entry *perf_callchain(struct pt_regs *regs)
|
||||
return NULL;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_EVENT_TRACING
|
||||
__weak
|
||||
void perf_arch_fetch_caller_regs(struct pt_regs *regs, unsigned long ip, int skip)
|
||||
{
|
||||
}
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Output
|
||||
*/
|
||||
@@ -4318,9 +4320,8 @@ static const struct pmu perf_ops_task_clock = {
|
||||
#ifdef CONFIG_EVENT_TRACING
|
||||
|
||||
void perf_tp_event(int event_id, u64 addr, u64 count, void *record,
|
||||
int entry_size)
|
||||
int entry_size, struct pt_regs *regs)
|
||||
{
|
||||
struct pt_regs *regs = get_irq_regs();
|
||||
struct perf_sample_data data;
|
||||
struct perf_raw_record raw = {
|
||||
.size = entry_size,
|
||||
@@ -4330,12 +4331,9 @@ void perf_tp_event(int event_id, u64 addr, u64 count, void *record,
|
||||
perf_sample_data_init(&data, addr);
|
||||
data.raw = &raw;
|
||||
|
||||
if (!regs)
|
||||
regs = task_pt_regs(current);
|
||||
|
||||
/* Trace events already protected against recursion */
|
||||
do_perf_sw_event(PERF_TYPE_TRACEPOINT, event_id, count, 1,
|
||||
&data, regs);
|
||||
&data, regs);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(perf_tp_event);
|
||||
|
||||
@@ -4351,7 +4349,7 @@ static int perf_tp_event_match(struct perf_event *event,
|
||||
|
||||
static void tp_perf_event_destroy(struct perf_event *event)
|
||||
{
|
||||
ftrace_profile_disable(event->attr.config);
|
||||
perf_trace_disable(event->attr.config);
|
||||
}
|
||||
|
||||
static const struct pmu *tp_perf_event_init(struct perf_event *event)
|
||||
@@ -4365,7 +4363,7 @@ static const struct pmu *tp_perf_event_init(struct perf_event *event)
|
||||
!capable(CAP_SYS_ADMIN))
|
||||
return ERR_PTR(-EPERM);
|
||||
|
||||
if (ftrace_profile_enable(event->attr.config))
|
||||
if (perf_trace_enable(event->attr.config))
|
||||
return NULL;
|
||||
|
||||
event->destroy = tp_perf_event_destroy;
|
||||
@@ -5372,18 +5370,26 @@ int perf_event_init_task(struct task_struct *child)
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void __init perf_event_init_all_cpus(void)
|
||||
{
|
||||
int cpu;
|
||||
struct perf_cpu_context *cpuctx;
|
||||
|
||||
for_each_possible_cpu(cpu) {
|
||||
cpuctx = &per_cpu(perf_cpu_context, cpu);
|
||||
__perf_event_init_context(&cpuctx->ctx, NULL);
|
||||
}
|
||||
}
|
||||
|
||||
static void __cpuinit perf_event_init_cpu(int cpu)
|
||||
{
|
||||
struct perf_cpu_context *cpuctx;
|
||||
|
||||
cpuctx = &per_cpu(perf_cpu_context, cpu);
|
||||
__perf_event_init_context(&cpuctx->ctx, NULL);
|
||||
|
||||
spin_lock(&perf_resource_lock);
|
||||
cpuctx->max_pertask = perf_max_events - perf_reserved_percpu;
|
||||
spin_unlock(&perf_resource_lock);
|
||||
|
||||
hw_perf_event_setup(cpu);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_HOTPLUG_CPU
|
||||
@@ -5423,20 +5429,11 @@ perf_cpu_notify(struct notifier_block *self, unsigned long action, void *hcpu)
|
||||
perf_event_init_cpu(cpu);
|
||||
break;
|
||||
|
||||
case CPU_ONLINE:
|
||||
case CPU_ONLINE_FROZEN:
|
||||
hw_perf_event_setup_online(cpu);
|
||||
break;
|
||||
|
||||
case CPU_DOWN_PREPARE:
|
||||
case CPU_DOWN_PREPARE_FROZEN:
|
||||
perf_event_exit_cpu(cpu);
|
||||
break;
|
||||
|
||||
case CPU_DEAD:
|
||||
hw_perf_event_setup_offline(cpu);
|
||||
break;
|
||||
|
||||
default:
|
||||
break;
|
||||
}
|
||||
@@ -5454,6 +5451,7 @@ static struct notifier_block __cpuinitdata perf_cpu_nb = {
|
||||
|
||||
void __init perf_event_init(void)
|
||||
{
|
||||
perf_event_init_all_cpus();
|
||||
perf_cpu_notify(&perf_cpu_nb, (unsigned long)CPU_UP_PREPARE,
|
||||
(void *)(long)smp_processor_id());
|
||||
perf_cpu_notify(&perf_cpu_nb, (unsigned long)CPU_ONLINE,
|
||||
|
@@ -52,7 +52,7 @@ obj-$(CONFIG_EVENT_TRACING) += trace_events.o
|
||||
obj-$(CONFIG_EVENT_TRACING) += trace_export.o
|
||||
obj-$(CONFIG_FTRACE_SYSCALLS) += trace_syscalls.o
|
||||
ifeq ($(CONFIG_PERF_EVENTS),y)
|
||||
obj-$(CONFIG_EVENT_TRACING) += trace_event_profile.o
|
||||
obj-$(CONFIG_EVENT_TRACING) += trace_event_perf.o
|
||||
endif
|
||||
obj-$(CONFIG_EVENT_TRACING) += trace_events_filter.o
|
||||
obj-$(CONFIG_KPROBE_EVENT) += trace_kprobe.o
|
||||
|
@@ -1,32 +1,36 @@
|
||||
/*
|
||||
* trace event based perf counter profiling
|
||||
* trace event based perf event profiling/tracing
|
||||
*
|
||||
* Copyright (C) 2009 Red Hat Inc, Peter Zijlstra <pzijlstr@redhat.com>
|
||||
*
|
||||
* Copyright (C) 2009-2010 Frederic Weisbecker <fweisbec@gmail.com>
|
||||
*/
|
||||
|
||||
#include <linux/module.h>
|
||||
#include <linux/kprobes.h>
|
||||
#include "trace.h"
|
||||
|
||||
DEFINE_PER_CPU(struct pt_regs, perf_trace_regs);
|
||||
EXPORT_PER_CPU_SYMBOL_GPL(perf_trace_regs);
|
||||
|
||||
EXPORT_SYMBOL_GPL(perf_arch_fetch_caller_regs);
|
||||
|
||||
static char *perf_trace_buf;
|
||||
static char *perf_trace_buf_nmi;
|
||||
|
||||
typedef typeof(char [FTRACE_MAX_PROFILE_SIZE]) perf_trace_t ;
|
||||
typedef typeof(char [PERF_MAX_TRACE_SIZE]) perf_trace_t ;
|
||||
|
||||
/* Count the events in use (per event id, not per instance) */
|
||||
static int total_profile_count;
|
||||
static int total_ref_count;
|
||||
|
||||
static int ftrace_profile_enable_event(struct ftrace_event_call *event)
|
||||
static int perf_trace_event_enable(struct ftrace_event_call *event)
|
||||
{
|
||||
char *buf;
|
||||
int ret = -ENOMEM;
|
||||
|
||||
if (event->profile_count++ > 0)
|
||||
if (event->perf_refcount++ > 0)
|
||||
return 0;
|
||||
|
||||
if (!total_profile_count) {
|
||||
if (!total_ref_count) {
|
||||
buf = (char *)alloc_percpu(perf_trace_t);
|
||||
if (!buf)
|
||||
goto fail_buf;
|
||||
@@ -40,35 +44,35 @@ static int ftrace_profile_enable_event(struct ftrace_event_call *event)
|
||||
rcu_assign_pointer(perf_trace_buf_nmi, buf);
|
||||
}
|
||||
|
||||
ret = event->profile_enable(event);
|
||||
ret = event->perf_event_enable(event);
|
||||
if (!ret) {
|
||||
total_profile_count++;
|
||||
total_ref_count++;
|
||||
return 0;
|
||||
}
|
||||
|
||||
fail_buf_nmi:
|
||||
if (!total_profile_count) {
|
||||
if (!total_ref_count) {
|
||||
free_percpu(perf_trace_buf_nmi);
|
||||
free_percpu(perf_trace_buf);
|
||||
perf_trace_buf_nmi = NULL;
|
||||
perf_trace_buf = NULL;
|
||||
}
|
||||
fail_buf:
|
||||
event->profile_count--;
|
||||
event->perf_refcount--;
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
int ftrace_profile_enable(int event_id)
|
||||
int perf_trace_enable(int event_id)
|
||||
{
|
||||
struct ftrace_event_call *event;
|
||||
int ret = -EINVAL;
|
||||
|
||||
mutex_lock(&event_mutex);
|
||||
list_for_each_entry(event, &ftrace_events, list) {
|
||||
if (event->id == event_id && event->profile_enable &&
|
||||
if (event->id == event_id && event->perf_event_enable &&
|
||||
try_module_get(event->mod)) {
|
||||
ret = ftrace_profile_enable_event(event);
|
||||
ret = perf_trace_event_enable(event);
|
||||
break;
|
||||
}
|
||||
}
|
||||
@@ -77,16 +81,16 @@ int ftrace_profile_enable(int event_id)
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void ftrace_profile_disable_event(struct ftrace_event_call *event)
|
||||
static void perf_trace_event_disable(struct ftrace_event_call *event)
|
||||
{
|
||||
char *buf, *nmi_buf;
|
||||
|
||||
if (--event->profile_count > 0)
|
||||
if (--event->perf_refcount > 0)
|
||||
return;
|
||||
|
||||
event->profile_disable(event);
|
||||
event->perf_event_disable(event);
|
||||
|
||||
if (!--total_profile_count) {
|
||||
if (!--total_ref_count) {
|
||||
buf = perf_trace_buf;
|
||||
rcu_assign_pointer(perf_trace_buf, NULL);
|
||||
|
||||
@@ -104,14 +108,14 @@ static void ftrace_profile_disable_event(struct ftrace_event_call *event)
|
||||
}
|
||||
}
|
||||
|
||||
void ftrace_profile_disable(int event_id)
|
||||
void perf_trace_disable(int event_id)
|
||||
{
|
||||
struct ftrace_event_call *event;
|
||||
|
||||
mutex_lock(&event_mutex);
|
||||
list_for_each_entry(event, &ftrace_events, list) {
|
||||
if (event->id == event_id) {
|
||||
ftrace_profile_disable_event(event);
|
||||
perf_trace_event_disable(event);
|
||||
module_put(event->mod);
|
||||
break;
|
||||
}
|
||||
@@ -119,8 +123,8 @@ void ftrace_profile_disable(int event_id)
|
||||
mutex_unlock(&event_mutex);
|
||||
}
|
||||
|
||||
__kprobes void *ftrace_perf_buf_prepare(int size, unsigned short type,
|
||||
int *rctxp, unsigned long *irq_flags)
|
||||
__kprobes void *perf_trace_buf_prepare(int size, unsigned short type,
|
||||
int *rctxp, unsigned long *irq_flags)
|
||||
{
|
||||
struct trace_entry *entry;
|
||||
char *trace_buf, *raw_data;
|
||||
@@ -161,4 +165,4 @@ err_recursion:
|
||||
local_irq_restore(*irq_flags);
|
||||
return NULL;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(ftrace_perf_buf_prepare);
|
||||
EXPORT_SYMBOL_GPL(perf_trace_buf_prepare);
|
@@ -938,7 +938,7 @@ event_create_dir(struct ftrace_event_call *call, struct dentry *d_events,
|
||||
trace_create_file("enable", 0644, call->dir, call,
|
||||
enable);
|
||||
|
||||
if (call->id && call->profile_enable)
|
||||
if (call->id && call->perf_event_enable)
|
||||
trace_create_file("id", 0444, call->dir, call,
|
||||
id);
|
||||
|
||||
|
@@ -1214,7 +1214,7 @@ static int set_print_fmt(struct trace_probe *tp)
|
||||
#ifdef CONFIG_PERF_EVENTS
|
||||
|
||||
/* Kprobe profile handler */
|
||||
static __kprobes void kprobe_profile_func(struct kprobe *kp,
|
||||
static __kprobes void kprobe_perf_func(struct kprobe *kp,
|
||||
struct pt_regs *regs)
|
||||
{
|
||||
struct trace_probe *tp = container_of(kp, struct trace_probe, rp.kp);
|
||||
@@ -1227,11 +1227,11 @@ static __kprobes void kprobe_profile_func(struct kprobe *kp,
|
||||
__size = SIZEOF_KPROBE_TRACE_ENTRY(tp->nr_args);
|
||||
size = ALIGN(__size + sizeof(u32), sizeof(u64));
|
||||
size -= sizeof(u32);
|
||||
if (WARN_ONCE(size > FTRACE_MAX_PROFILE_SIZE,
|
||||
if (WARN_ONCE(size > PERF_MAX_TRACE_SIZE,
|
||||
"profile buffer not large enough"))
|
||||
return;
|
||||
|
||||
entry = ftrace_perf_buf_prepare(size, call->id, &rctx, &irq_flags);
|
||||
entry = perf_trace_buf_prepare(size, call->id, &rctx, &irq_flags);
|
||||
if (!entry)
|
||||
return;
|
||||
|
||||
@@ -1240,11 +1240,11 @@ static __kprobes void kprobe_profile_func(struct kprobe *kp,
|
||||
for (i = 0; i < tp->nr_args; i++)
|
||||
entry->args[i] = call_fetch(&tp->args[i].fetch, regs);
|
||||
|
||||
ftrace_perf_buf_submit(entry, size, rctx, entry->ip, 1, irq_flags);
|
||||
perf_trace_buf_submit(entry, size, rctx, entry->ip, 1, irq_flags, regs);
|
||||
}
|
||||
|
||||
/* Kretprobe profile handler */
|
||||
static __kprobes void kretprobe_profile_func(struct kretprobe_instance *ri,
|
||||
static __kprobes void kretprobe_perf_func(struct kretprobe_instance *ri,
|
||||
struct pt_regs *regs)
|
||||
{
|
||||
struct trace_probe *tp = container_of(ri->rp, struct trace_probe, rp);
|
||||
@@ -1257,11 +1257,11 @@ static __kprobes void kretprobe_profile_func(struct kretprobe_instance *ri,
|
||||
__size = SIZEOF_KRETPROBE_TRACE_ENTRY(tp->nr_args);
|
||||
size = ALIGN(__size + sizeof(u32), sizeof(u64));
|
||||
size -= sizeof(u32);
|
||||
if (WARN_ONCE(size > FTRACE_MAX_PROFILE_SIZE,
|
||||
if (WARN_ONCE(size > PERF_MAX_TRACE_SIZE,
|
||||
"profile buffer not large enough"))
|
||||
return;
|
||||
|
||||
entry = ftrace_perf_buf_prepare(size, call->id, &rctx, &irq_flags);
|
||||
entry = perf_trace_buf_prepare(size, call->id, &rctx, &irq_flags);
|
||||
if (!entry)
|
||||
return;
|
||||
|
||||
@@ -1271,10 +1271,11 @@ static __kprobes void kretprobe_profile_func(struct kretprobe_instance *ri,
|
||||
for (i = 0; i < tp->nr_args; i++)
|
||||
entry->args[i] = call_fetch(&tp->args[i].fetch, regs);
|
||||
|
||||
ftrace_perf_buf_submit(entry, size, rctx, entry->ret_ip, 1, irq_flags);
|
||||
perf_trace_buf_submit(entry, size, rctx, entry->ret_ip, 1,
|
||||
irq_flags, regs);
|
||||
}
|
||||
|
||||
static int probe_profile_enable(struct ftrace_event_call *call)
|
||||
static int probe_perf_enable(struct ftrace_event_call *call)
|
||||
{
|
||||
struct trace_probe *tp = (struct trace_probe *)call->data;
|
||||
|
||||
@@ -1286,7 +1287,7 @@ static int probe_profile_enable(struct ftrace_event_call *call)
|
||||
return enable_kprobe(&tp->rp.kp);
|
||||
}
|
||||
|
||||
static void probe_profile_disable(struct ftrace_event_call *call)
|
||||
static void probe_perf_disable(struct ftrace_event_call *call)
|
||||
{
|
||||
struct trace_probe *tp = (struct trace_probe *)call->data;
|
||||
|
||||
@@ -1311,7 +1312,7 @@ int kprobe_dispatcher(struct kprobe *kp, struct pt_regs *regs)
|
||||
kprobe_trace_func(kp, regs);
|
||||
#ifdef CONFIG_PERF_EVENTS
|
||||
if (tp->flags & TP_FLAG_PROFILE)
|
||||
kprobe_profile_func(kp, regs);
|
||||
kprobe_perf_func(kp, regs);
|
||||
#endif
|
||||
return 0; /* We don't tweek kernel, so just return 0 */
|
||||
}
|
||||
@@ -1325,7 +1326,7 @@ int kretprobe_dispatcher(struct kretprobe_instance *ri, struct pt_regs *regs)
|
||||
kretprobe_trace_func(ri, regs);
|
||||
#ifdef CONFIG_PERF_EVENTS
|
||||
if (tp->flags & TP_FLAG_PROFILE)
|
||||
kretprobe_profile_func(ri, regs);
|
||||
kretprobe_perf_func(ri, regs);
|
||||
#endif
|
||||
return 0; /* We don't tweek kernel, so just return 0 */
|
||||
}
|
||||
@@ -1358,8 +1359,8 @@ static int register_probe_event(struct trace_probe *tp)
|
||||
call->unregfunc = probe_event_disable;
|
||||
|
||||
#ifdef CONFIG_PERF_EVENTS
|
||||
call->profile_enable = probe_profile_enable;
|
||||
call->profile_disable = probe_profile_disable;
|
||||
call->perf_event_enable = probe_perf_enable;
|
||||
call->perf_event_disable = probe_perf_disable;
|
||||
#endif
|
||||
call->data = tp;
|
||||
ret = trace_add_event_call(call);
|
||||
|
@@ -428,12 +428,12 @@ core_initcall(init_ftrace_syscalls);
|
||||
|
||||
#ifdef CONFIG_PERF_EVENTS
|
||||
|
||||
static DECLARE_BITMAP(enabled_prof_enter_syscalls, NR_syscalls);
|
||||
static DECLARE_BITMAP(enabled_prof_exit_syscalls, NR_syscalls);
|
||||
static int sys_prof_refcount_enter;
|
||||
static int sys_prof_refcount_exit;
|
||||
static DECLARE_BITMAP(enabled_perf_enter_syscalls, NR_syscalls);
|
||||
static DECLARE_BITMAP(enabled_perf_exit_syscalls, NR_syscalls);
|
||||
static int sys_perf_refcount_enter;
|
||||
static int sys_perf_refcount_exit;
|
||||
|
||||
static void prof_syscall_enter(struct pt_regs *regs, long id)
|
||||
static void perf_syscall_enter(struct pt_regs *regs, long id)
|
||||
{
|
||||
struct syscall_metadata *sys_data;
|
||||
struct syscall_trace_enter *rec;
|
||||
@@ -443,7 +443,7 @@ static void prof_syscall_enter(struct pt_regs *regs, long id)
|
||||
int size;
|
||||
|
||||
syscall_nr = syscall_get_nr(current, regs);
|
||||
if (!test_bit(syscall_nr, enabled_prof_enter_syscalls))
|
||||
if (!test_bit(syscall_nr, enabled_perf_enter_syscalls))
|
||||
return;
|
||||
|
||||
sys_data = syscall_nr_to_meta(syscall_nr);
|
||||
@@ -455,11 +455,11 @@ static void prof_syscall_enter(struct pt_regs *regs, long id)
|
||||
size = ALIGN(size + sizeof(u32), sizeof(u64));
|
||||
size -= sizeof(u32);
|
||||
|
||||
if (WARN_ONCE(size > FTRACE_MAX_PROFILE_SIZE,
|
||||
"profile buffer not large enough"))
|
||||
if (WARN_ONCE(size > PERF_MAX_TRACE_SIZE,
|
||||
"perf buffer not large enough"))
|
||||
return;
|
||||
|
||||
rec = (struct syscall_trace_enter *)ftrace_perf_buf_prepare(size,
|
||||
rec = (struct syscall_trace_enter *)perf_trace_buf_prepare(size,
|
||||
sys_data->enter_event->id, &rctx, &flags);
|
||||
if (!rec)
|
||||
return;
|
||||
@@ -467,10 +467,10 @@ static void prof_syscall_enter(struct pt_regs *regs, long id)
|
||||
rec->nr = syscall_nr;
|
||||
syscall_get_arguments(current, regs, 0, sys_data->nb_args,
|
||||
(unsigned long *)&rec->args);
|
||||
ftrace_perf_buf_submit(rec, size, rctx, 0, 1, flags);
|
||||
perf_trace_buf_submit(rec, size, rctx, 0, 1, flags, regs);
|
||||
}
|
||||
|
||||
int prof_sysenter_enable(struct ftrace_event_call *call)
|
||||
int perf_sysenter_enable(struct ftrace_event_call *call)
|
||||
{
|
||||
int ret = 0;
|
||||
int num;
|
||||
@@ -478,34 +478,34 @@ int prof_sysenter_enable(struct ftrace_event_call *call)
|
||||
num = ((struct syscall_metadata *)call->data)->syscall_nr;
|
||||
|
||||
mutex_lock(&syscall_trace_lock);
|
||||
if (!sys_prof_refcount_enter)
|
||||
ret = register_trace_sys_enter(prof_syscall_enter);
|
||||
if (!sys_perf_refcount_enter)
|
||||
ret = register_trace_sys_enter(perf_syscall_enter);
|
||||
if (ret) {
|
||||
pr_info("event trace: Could not activate"
|
||||
"syscall entry trace point");
|
||||
} else {
|
||||
set_bit(num, enabled_prof_enter_syscalls);
|
||||
sys_prof_refcount_enter++;
|
||||
set_bit(num, enabled_perf_enter_syscalls);
|
||||
sys_perf_refcount_enter++;
|
||||
}
|
||||
mutex_unlock(&syscall_trace_lock);
|
||||
return ret;
|
||||
}
|
||||
|
||||
void prof_sysenter_disable(struct ftrace_event_call *call)
|
||||
void perf_sysenter_disable(struct ftrace_event_call *call)
|
||||
{
|
||||
int num;
|
||||
|
||||
num = ((struct syscall_metadata *)call->data)->syscall_nr;
|
||||
|
||||
mutex_lock(&syscall_trace_lock);
|
||||
sys_prof_refcount_enter--;
|
||||
clear_bit(num, enabled_prof_enter_syscalls);
|
||||
if (!sys_prof_refcount_enter)
|
||||
unregister_trace_sys_enter(prof_syscall_enter);
|
||||
sys_perf_refcount_enter--;
|
||||
clear_bit(num, enabled_perf_enter_syscalls);
|
||||
if (!sys_perf_refcount_enter)
|
||||
unregister_trace_sys_enter(perf_syscall_enter);
|
||||
mutex_unlock(&syscall_trace_lock);
|
||||
}
|
||||
|
||||
static void prof_syscall_exit(struct pt_regs *regs, long ret)
|
||||
static void perf_syscall_exit(struct pt_regs *regs, long ret)
|
||||
{
|
||||
struct syscall_metadata *sys_data;
|
||||
struct syscall_trace_exit *rec;
|
||||
@@ -515,7 +515,7 @@ static void prof_syscall_exit(struct pt_regs *regs, long ret)
|
||||
int size;
|
||||
|
||||
syscall_nr = syscall_get_nr(current, regs);
|
||||
if (!test_bit(syscall_nr, enabled_prof_exit_syscalls))
|
||||
if (!test_bit(syscall_nr, enabled_perf_exit_syscalls))
|
||||
return;
|
||||
|
||||
sys_data = syscall_nr_to_meta(syscall_nr);
|
||||
@@ -530,11 +530,11 @@ static void prof_syscall_exit(struct pt_regs *regs, long ret)
|
||||
* Impossible, but be paranoid with the future
|
||||
* How to put this check outside runtime?
|
||||
*/
|
||||
if (WARN_ONCE(size > FTRACE_MAX_PROFILE_SIZE,
|
||||
"exit event has grown above profile buffer size"))
|
||||
if (WARN_ONCE(size > PERF_MAX_TRACE_SIZE,
|
||||
"exit event has grown above perf buffer size"))
|
||||
return;
|
||||
|
||||
rec = (struct syscall_trace_exit *)ftrace_perf_buf_prepare(size,
|
||||
rec = (struct syscall_trace_exit *)perf_trace_buf_prepare(size,
|
||||
sys_data->exit_event->id, &rctx, &flags);
|
||||
if (!rec)
|
||||
return;
|
||||
@@ -542,10 +542,10 @@ static void prof_syscall_exit(struct pt_regs *regs, long ret)
|
||||
rec->nr = syscall_nr;
|
||||
rec->ret = syscall_get_return_value(current, regs);
|
||||
|
||||
ftrace_perf_buf_submit(rec, size, rctx, 0, 1, flags);
|
||||
perf_trace_buf_submit(rec, size, rctx, 0, 1, flags, regs);
|
||||
}
|
||||
|
||||
int prof_sysexit_enable(struct ftrace_event_call *call)
|
||||
int perf_sysexit_enable(struct ftrace_event_call *call)
|
||||
{
|
||||
int ret = 0;
|
||||
int num;
|
||||
@@ -553,30 +553,30 @@ int prof_sysexit_enable(struct ftrace_event_call *call)
|
||||
num = ((struct syscall_metadata *)call->data)->syscall_nr;
|
||||
|
||||
mutex_lock(&syscall_trace_lock);
|
||||
if (!sys_prof_refcount_exit)
|
||||
ret = register_trace_sys_exit(prof_syscall_exit);
|
||||
if (!sys_perf_refcount_exit)
|
||||
ret = register_trace_sys_exit(perf_syscall_exit);
|
||||
if (ret) {
|
||||
pr_info("event trace: Could not activate"
|
||||
"syscall exit trace point");
|
||||
} else {
|
||||
set_bit(num, enabled_prof_exit_syscalls);
|
||||
sys_prof_refcount_exit++;
|
||||
set_bit(num, enabled_perf_exit_syscalls);
|
||||
sys_perf_refcount_exit++;
|
||||
}
|
||||
mutex_unlock(&syscall_trace_lock);
|
||||
return ret;
|
||||
}
|
||||
|
||||
void prof_sysexit_disable(struct ftrace_event_call *call)
|
||||
void perf_sysexit_disable(struct ftrace_event_call *call)
|
||||
{
|
||||
int num;
|
||||
|
||||
num = ((struct syscall_metadata *)call->data)->syscall_nr;
|
||||
|
||||
mutex_lock(&syscall_trace_lock);
|
||||
sys_prof_refcount_exit--;
|
||||
clear_bit(num, enabled_prof_exit_syscalls);
|
||||
if (!sys_prof_refcount_exit)
|
||||
unregister_trace_sys_exit(prof_syscall_exit);
|
||||
sys_perf_refcount_exit--;
|
||||
clear_bit(num, enabled_perf_exit_syscalls);
|
||||
if (!sys_perf_refcount_exit)
|
||||
unregister_trace_sys_exit(perf_syscall_exit);
|
||||
mutex_unlock(&syscall_trace_lock);
|
||||
}
|
||||
|
||||
|
Reference in New Issue
Block a user