tracing/kprobes: Turn trace_probe->files into list_head
I think that "ftrace_event_file *trace_probe[]" complicates the code for no reason, turn it into list_head to simplify the code. enable_trace_probe() no longer needs synchronize_sched(). This needs the extra sizeof(list_head) memory for every attached ftrace_event_file, hopefully not a problem in this case. Link: http://lkml.kernel.org/r/20130620173814.GA13165@redhat.com Acked-by: Masami Hiramatsu <masami.hiramatsu.pt@hitachi.com> Signed-off-by: Oleg Nesterov <oleg@redhat.com> Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
This commit is contained in:
committed by
Steven Rostedt
parent
3baa5e4cf2
commit
b04d52e368
@@ -35,12 +35,17 @@ struct trace_probe {
|
|||||||
const char *symbol; /* symbol name */
|
const char *symbol; /* symbol name */
|
||||||
struct ftrace_event_class class;
|
struct ftrace_event_class class;
|
||||||
struct ftrace_event_call call;
|
struct ftrace_event_call call;
|
||||||
struct ftrace_event_file * __rcu *files;
|
struct list_head files;
|
||||||
ssize_t size; /* trace entry size */
|
ssize_t size; /* trace entry size */
|
||||||
unsigned int nr_args;
|
unsigned int nr_args;
|
||||||
struct probe_arg args[];
|
struct probe_arg args[];
|
||||||
};
|
};
|
||||||
|
|
||||||
|
struct event_file_link {
|
||||||
|
struct ftrace_event_file *file;
|
||||||
|
struct list_head list;
|
||||||
|
};
|
||||||
|
|
||||||
#define SIZEOF_TRACE_PROBE(n) \
|
#define SIZEOF_TRACE_PROBE(n) \
|
||||||
(offsetof(struct trace_probe, args) + \
|
(offsetof(struct trace_probe, args) + \
|
||||||
(sizeof(struct probe_arg) * (n)))
|
(sizeof(struct probe_arg) * (n)))
|
||||||
@@ -150,6 +155,7 @@ static struct trace_probe *alloc_trace_probe(const char *group,
|
|||||||
goto error;
|
goto error;
|
||||||
|
|
||||||
INIT_LIST_HEAD(&tp->list);
|
INIT_LIST_HEAD(&tp->list);
|
||||||
|
INIT_LIST_HEAD(&tp->files);
|
||||||
return tp;
|
return tp;
|
||||||
error:
|
error:
|
||||||
kfree(tp->call.name);
|
kfree(tp->call.name);
|
||||||
@@ -183,22 +189,6 @@ static struct trace_probe *find_trace_probe(const char *event,
|
|||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
|
||||||
* This and enable_trace_probe/disable_trace_probe rely on event_mutex
|
|
||||||
* held by the caller, __ftrace_set_clr_event().
|
|
||||||
*/
|
|
||||||
static int trace_probe_nr_files(struct trace_probe *tp)
|
|
||||||
{
|
|
||||||
struct ftrace_event_file **file = rcu_dereference_raw(tp->files);
|
|
||||||
int ret = 0;
|
|
||||||
|
|
||||||
if (file)
|
|
||||||
while (*(file++))
|
|
||||||
ret++;
|
|
||||||
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Enable trace_probe
|
* Enable trace_probe
|
||||||
* if the file is NULL, enable "perf" handler, or enable "trace" handler.
|
* if the file is NULL, enable "perf" handler, or enable "trace" handler.
|
||||||
@@ -209,29 +199,18 @@ enable_trace_probe(struct trace_probe *tp, struct ftrace_event_file *file)
|
|||||||
int ret = 0;
|
int ret = 0;
|
||||||
|
|
||||||
if (file) {
|
if (file) {
|
||||||
struct ftrace_event_file **new, **old;
|
struct event_file_link *link;
|
||||||
int n = trace_probe_nr_files(tp);
|
|
||||||
|
|
||||||
old = rcu_dereference_raw(tp->files);
|
link = kmalloc(sizeof(*link), GFP_KERNEL);
|
||||||
/* 1 is for new one and 1 is for stopper */
|
if (!link) {
|
||||||
new = kzalloc((n + 2) * sizeof(struct ftrace_event_file *),
|
|
||||||
GFP_KERNEL);
|
|
||||||
if (!new) {
|
|
||||||
ret = -ENOMEM;
|
ret = -ENOMEM;
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
memcpy(new, old, n * sizeof(struct ftrace_event_file *));
|
|
||||||
new[n] = file;
|
|
||||||
/* The last one keeps a NULL */
|
|
||||||
|
|
||||||
rcu_assign_pointer(tp->files, new);
|
link->file = file;
|
||||||
|
list_add_tail_rcu(&link->list, &tp->files);
|
||||||
|
|
||||||
tp->flags |= TP_FLAG_TRACE;
|
tp->flags |= TP_FLAG_TRACE;
|
||||||
|
|
||||||
if (old) {
|
|
||||||
/* Make sure the probe is done with old files */
|
|
||||||
synchronize_sched();
|
|
||||||
kfree(old);
|
|
||||||
}
|
|
||||||
} else
|
} else
|
||||||
tp->flags |= TP_FLAG_PROFILE;
|
tp->flags |= TP_FLAG_PROFILE;
|
||||||
|
|
||||||
@@ -245,24 +224,16 @@ enable_trace_probe(struct trace_probe *tp, struct ftrace_event_file *file)
|
|||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int
|
static struct event_file_link *
|
||||||
trace_probe_file_index(struct trace_probe *tp, struct ftrace_event_file *file)
|
find_event_file_link(struct trace_probe *tp, struct ftrace_event_file *file)
|
||||||
{
|
{
|
||||||
struct ftrace_event_file **files;
|
struct event_file_link *link;
|
||||||
int i;
|
|
||||||
|
|
||||||
/*
|
list_for_each_entry(link, &tp->files, list)
|
||||||
* Since all tp->files updater is protected by probe_enable_lock,
|
if (link->file == file)
|
||||||
* we don't need to lock an rcu_read_lock.
|
return link;
|
||||||
*/
|
|
||||||
files = rcu_dereference_raw(tp->files);
|
|
||||||
if (files) {
|
|
||||||
for (i = 0; files[i]; i++)
|
|
||||||
if (files[i] == file)
|
|
||||||
return i;
|
|
||||||
}
|
|
||||||
|
|
||||||
return -1;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@@ -275,38 +246,23 @@ disable_trace_probe(struct trace_probe *tp, struct ftrace_event_file *file)
|
|||||||
int ret = 0;
|
int ret = 0;
|
||||||
|
|
||||||
if (file) {
|
if (file) {
|
||||||
struct ftrace_event_file **new, **old;
|
struct event_file_link *link;
|
||||||
int n = trace_probe_nr_files(tp);
|
|
||||||
int i, j;
|
|
||||||
|
|
||||||
old = rcu_dereference_raw(tp->files);
|
link = find_event_file_link(tp, file);
|
||||||
if (n == 0 || trace_probe_file_index(tp, file) < 0) {
|
if (!link) {
|
||||||
ret = -EINVAL;
|
ret = -EINVAL;
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (n == 1) { /* Remove the last file */
|
list_del_rcu(&link->list);
|
||||||
tp->flags &= ~TP_FLAG_TRACE;
|
/* synchronize with kprobe_trace_func/kretprobe_trace_func */
|
||||||
new = NULL;
|
|
||||||
} else {
|
|
||||||
new = kzalloc(n * sizeof(struct ftrace_event_file *),
|
|
||||||
GFP_KERNEL);
|
|
||||||
if (!new) {
|
|
||||||
ret = -ENOMEM;
|
|
||||||
goto out;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* This copy & check loop copies the NULL stopper too */
|
|
||||||
for (i = 0, j = 0; j < n && i < n + 1; i++)
|
|
||||||
if (old[i] != file)
|
|
||||||
new[j++] = old[i];
|
|
||||||
}
|
|
||||||
|
|
||||||
rcu_assign_pointer(tp->files, new);
|
|
||||||
|
|
||||||
/* Make sure the probe is done with old files */
|
|
||||||
synchronize_sched();
|
synchronize_sched();
|
||||||
kfree(old);
|
kfree(link);
|
||||||
|
|
||||||
|
if (!list_empty(&tp->files))
|
||||||
|
goto out;
|
||||||
|
|
||||||
|
tp->flags &= ~TP_FLAG_TRACE;
|
||||||
} else
|
} else
|
||||||
tp->flags &= ~TP_FLAG_PROFILE;
|
tp->flags &= ~TP_FLAG_PROFILE;
|
||||||
|
|
||||||
@@ -871,20 +827,10 @@ __kprobe_trace_func(struct trace_probe *tp, struct pt_regs *regs,
|
|||||||
static __kprobes void
|
static __kprobes void
|
||||||
kprobe_trace_func(struct trace_probe *tp, struct pt_regs *regs)
|
kprobe_trace_func(struct trace_probe *tp, struct pt_regs *regs)
|
||||||
{
|
{
|
||||||
/*
|
struct event_file_link *link;
|
||||||
* Note: preempt is already disabled around the kprobe handler.
|
|
||||||
* However, we still need an smp_read_barrier_depends() corresponding
|
|
||||||
* to smp_wmb() in rcu_assign_pointer() to access the pointer.
|
|
||||||
*/
|
|
||||||
struct ftrace_event_file **file = rcu_dereference_raw(tp->files);
|
|
||||||
|
|
||||||
if (unlikely(!file))
|
list_for_each_entry_rcu(link, &tp->files, list)
|
||||||
return;
|
__kprobe_trace_func(tp, regs, link->file);
|
||||||
|
|
||||||
while (*file) {
|
|
||||||
__kprobe_trace_func(tp, regs, *file);
|
|
||||||
file++;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Kretprobe handler */
|
/* Kretprobe handler */
|
||||||
@@ -931,20 +877,10 @@ static __kprobes void
|
|||||||
kretprobe_trace_func(struct trace_probe *tp, struct kretprobe_instance *ri,
|
kretprobe_trace_func(struct trace_probe *tp, struct kretprobe_instance *ri,
|
||||||
struct pt_regs *regs)
|
struct pt_regs *regs)
|
||||||
{
|
{
|
||||||
/*
|
struct event_file_link *link;
|
||||||
* Note: preempt is already disabled around the kprobe handler.
|
|
||||||
* However, we still need an smp_read_barrier_depends() corresponding
|
|
||||||
* to smp_wmb() in rcu_assign_pointer() to access the pointer.
|
|
||||||
*/
|
|
||||||
struct ftrace_event_file **file = rcu_dereference_raw(tp->files);
|
|
||||||
|
|
||||||
if (unlikely(!file))
|
list_for_each_entry_rcu(link, &tp->files, list)
|
||||||
return;
|
__kretprobe_trace_func(tp, ri, regs, link->file);
|
||||||
|
|
||||||
while (*file) {
|
|
||||||
__kretprobe_trace_func(tp, ri, regs, *file);
|
|
||||||
file++;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Event entry printers */
|
/* Event entry printers */
|
||||||
|
Reference in New Issue
Block a user