sh: Function graph tracer support
Add both dynamic and static function graph tracer support for sh. Signed-off-by: Matt Fleming <matt@console-pimps.org> Signed-off-by: Paul Mundt <lethal@linux-sh.org>
This commit is contained in:
@@ -16,11 +16,13 @@
|
||||
#include <linux/string.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/io.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <asm/ftrace.h>
|
||||
#include <asm/cacheflush.h>
|
||||
#include <asm/unistd.h>
|
||||
#include <trace/syscall.h>
|
||||
|
||||
#ifdef CONFIG_DYNAMIC_FTRACE
|
||||
static unsigned char ftrace_replaced_code[MCOUNT_INSN_SIZE];
|
||||
|
||||
static unsigned char ftrace_nop[4];
|
||||
@@ -133,6 +135,126 @@ int __init ftrace_dyn_arch_init(void *data)
|
||||
|
||||
return 0;
|
||||
}
|
||||
#endif /* CONFIG_DYNAMIC_FTRACE */
|
||||
|
||||
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
|
||||
#ifdef CONFIG_DYNAMIC_FTRACE
|
||||
extern void ftrace_graph_call(void);
|
||||
|
||||
static int ftrace_mod(unsigned long ip, unsigned long old_addr,
|
||||
unsigned long new_addr)
|
||||
{
|
||||
unsigned char code[MCOUNT_INSN_SIZE];
|
||||
|
||||
if (probe_kernel_read(code, (void *)ip, MCOUNT_INSN_SIZE))
|
||||
return -EFAULT;
|
||||
|
||||
if (old_addr != __raw_readl((unsigned long *)code))
|
||||
return -EINVAL;
|
||||
|
||||
__raw_writel(new_addr, ip);
|
||||
return 0;
|
||||
}
|
||||
|
||||
int ftrace_enable_ftrace_graph_caller(void)
|
||||
{
|
||||
unsigned long ip, old_addr, new_addr;
|
||||
|
||||
ip = (unsigned long)(&ftrace_graph_call) + GRAPH_INSN_OFFSET;
|
||||
old_addr = (unsigned long)(&skip_trace);
|
||||
new_addr = (unsigned long)(&ftrace_graph_caller);
|
||||
|
||||
return ftrace_mod(ip, old_addr, new_addr);
|
||||
}
|
||||
|
||||
int ftrace_disable_ftrace_graph_caller(void)
|
||||
{
|
||||
unsigned long ip, old_addr, new_addr;
|
||||
|
||||
ip = (unsigned long)(&ftrace_graph_call) + GRAPH_INSN_OFFSET;
|
||||
old_addr = (unsigned long)(&ftrace_graph_caller);
|
||||
new_addr = (unsigned long)(&skip_trace);
|
||||
|
||||
return ftrace_mod(ip, old_addr, new_addr);
|
||||
}
|
||||
#endif /* CONFIG_DYNAMIC_FTRACE */
|
||||
|
||||
/*
|
||||
* Hook the return address and push it in the stack of return addrs
|
||||
* in the current thread info.
|
||||
*
|
||||
* This is the main routine for the function graph tracer. The function
|
||||
* graph tracer essentially works like this:
|
||||
*
|
||||
* parent is the stack address containing self_addr's return address.
|
||||
* We pull the real return address out of parent and store it in
|
||||
* current's ret_stack. Then, we replace the return address on the stack
|
||||
* with the address of return_to_handler. self_addr is the function that
|
||||
* called mcount.
|
||||
*
|
||||
* When self_addr returns, it will jump to return_to_handler which calls
|
||||
* ftrace_return_to_handler. ftrace_return_to_handler will pull the real
|
||||
* return address off of current's ret_stack and jump to it.
|
||||
*/
|
||||
void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr)
|
||||
{
|
||||
unsigned long old;
|
||||
int faulted, err;
|
||||
struct ftrace_graph_ent trace;
|
||||
unsigned long return_hooker = (unsigned long)&return_to_handler;
|
||||
|
||||
if (unlikely(atomic_read(¤t->tracing_graph_pause)))
|
||||
return;
|
||||
|
||||
/*
|
||||
* Protect against fault, even if it shouldn't
|
||||
* happen. This tool is too much intrusive to
|
||||
* ignore such a protection.
|
||||
*/
|
||||
__asm__ __volatile__(
|
||||
"1: \n\t"
|
||||
"mov.l @%2, %0 \n\t"
|
||||
"2: \n\t"
|
||||
"mov.l %3, @%2 \n\t"
|
||||
"mov #0, %1 \n\t"
|
||||
"3: \n\t"
|
||||
".section .fixup, \"ax\" \n\t"
|
||||
"4: \n\t"
|
||||
"mov.l 5f, %0 \n\t"
|
||||
"jmp @%0 \n\t"
|
||||
" mov #1, %1 \n\t"
|
||||
".balign 4 \n\t"
|
||||
"5: .long 3b \n\t"
|
||||
".previous \n\t"
|
||||
".section __ex_table,\"a\" \n\t"
|
||||
".long 1b, 4b \n\t"
|
||||
".long 2b, 4b \n\t"
|
||||
".previous \n\t"
|
||||
: "=&r" (old), "=r" (faulted)
|
||||
: "r" (parent), "r" (return_hooker)
|
||||
);
|
||||
|
||||
if (unlikely(faulted)) {
|
||||
ftrace_graph_stop();
|
||||
WARN_ON(1);
|
||||
return;
|
||||
}
|
||||
|
||||
err = ftrace_push_return_trace(old, self_addr, &trace.depth, 0);
|
||||
if (err == -EBUSY) {
|
||||
__raw_writel(old, parent);
|
||||
return;
|
||||
}
|
||||
|
||||
trace.func = self_addr;
|
||||
|
||||
/* Only trace if the calling function expects to */
|
||||
if (!ftrace_graph_entry(&trace)) {
|
||||
current->curr_ret_stack--;
|
||||
__raw_writel(old, parent);
|
||||
}
|
||||
}
|
||||
#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
|
||||
|
||||
#ifdef CONFIG_FTRACE_SYSCALLS
|
||||
|
||||
|
Reference in New Issue
Block a user