Merge branch 'tip/perf/core' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt/linux-trace into perf/core
Pull tracing updated from Steve Rostedt. Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
@@ -1842,6 +1842,89 @@ an error.
|
|||||||
# cat buffer_size_kb
|
# cat buffer_size_kb
|
||||||
85
|
85
|
||||||
|
|
||||||
|
Snapshot
|
||||||
|
--------
|
||||||
|
CONFIG_TRACER_SNAPSHOT makes a generic snapshot feature
|
||||||
|
available to all non latency tracers. (Latency tracers which
|
||||||
|
record max latency, such as "irqsoff" or "wakeup", can't use
|
||||||
|
this feature, since those are already using the snapshot
|
||||||
|
mechanism internally.)
|
||||||
|
|
||||||
|
Snapshot preserves a current trace buffer at a particular point
|
||||||
|
in time without stopping tracing. Ftrace swaps the current
|
||||||
|
buffer with a spare buffer, and tracing continues in the new
|
||||||
|
current (=previous spare) buffer.
|
||||||
|
|
||||||
|
The following debugfs files in "tracing" are related to this
|
||||||
|
feature:
|
||||||
|
|
||||||
|
snapshot:
|
||||||
|
|
||||||
|
This is used to take a snapshot and to read the output
|
||||||
|
of the snapshot. Echo 1 into this file to allocate a
|
||||||
|
spare buffer and to take a snapshot (swap), then read
|
||||||
|
the snapshot from this file in the same format as
|
||||||
|
"trace" (described above in the section "The File
|
||||||
|
System"). Both reads snapshot and tracing are executable
|
||||||
|
in parallel. When the spare buffer is allocated, echoing
|
||||||
|
0 frees it, and echoing else (positive) values clear the
|
||||||
|
snapshot contents.
|
||||||
|
More details are shown in the table below.
|
||||||
|
|
||||||
|
status\input | 0 | 1 | else |
|
||||||
|
--------------+------------+------------+------------+
|
||||||
|
not allocated |(do nothing)| alloc+swap | EINVAL |
|
||||||
|
--------------+------------+------------+------------+
|
||||||
|
allocated | free | swap | clear |
|
||||||
|
--------------+------------+------------+------------+
|
||||||
|
|
||||||
|
Here is an example of using the snapshot feature.
|
||||||
|
|
||||||
|
# echo 1 > events/sched/enable
|
||||||
|
# echo 1 > snapshot
|
||||||
|
# cat snapshot
|
||||||
|
# tracer: nop
|
||||||
|
#
|
||||||
|
# entries-in-buffer/entries-written: 71/71 #P:8
|
||||||
|
#
|
||||||
|
# _-----=> irqs-off
|
||||||
|
# / _----=> need-resched
|
||||||
|
# | / _---=> hardirq/softirq
|
||||||
|
# || / _--=> preempt-depth
|
||||||
|
# ||| / delay
|
||||||
|
# TASK-PID CPU# |||| TIMESTAMP FUNCTION
|
||||||
|
# | | | |||| | |
|
||||||
|
<idle>-0 [005] d... 2440.603828: sched_switch: prev_comm=swapper/5 prev_pid=0 prev_prio=120 prev_state=R ==> next_comm=snapshot-test-2 next_pid=2242 next_prio=120
|
||||||
|
sleep-2242 [005] d... 2440.603846: sched_switch: prev_comm=snapshot-test-2 prev_pid=2242 prev_prio=120 prev_state=R ==> next_comm=kworker/5:1 next_pid=60 next_prio=120
|
||||||
|
[...]
|
||||||
|
<idle>-0 [002] d... 2440.707230: sched_switch: prev_comm=swapper/2 prev_pid=0 prev_prio=120 prev_state=R ==> next_comm=snapshot-test-2 next_pid=2229 next_prio=120
|
||||||
|
|
||||||
|
# cat trace
|
||||||
|
# tracer: nop
|
||||||
|
#
|
||||||
|
# entries-in-buffer/entries-written: 77/77 #P:8
|
||||||
|
#
|
||||||
|
# _-----=> irqs-off
|
||||||
|
# / _----=> need-resched
|
||||||
|
# | / _---=> hardirq/softirq
|
||||||
|
# || / _--=> preempt-depth
|
||||||
|
# ||| / delay
|
||||||
|
# TASK-PID CPU# |||| TIMESTAMP FUNCTION
|
||||||
|
# | | | |||| | |
|
||||||
|
<idle>-0 [007] d... 2440.707395: sched_switch: prev_comm=swapper/7 prev_pid=0 prev_prio=120 prev_state=R ==> next_comm=snapshot-test-2 next_pid=2243 next_prio=120
|
||||||
|
snapshot-test-2-2229 [002] d... 2440.707438: sched_switch: prev_comm=snapshot-test-2 prev_pid=2229 prev_prio=120 prev_state=S ==> next_comm=swapper/2 next_pid=0 next_prio=120
|
||||||
|
[...]
|
||||||
|
|
||||||
|
|
||||||
|
If you try to use this snapshot feature when current tracer is
|
||||||
|
one of the latency tracers, you will get the following results.
|
||||||
|
|
||||||
|
# echo wakeup > current_tracer
|
||||||
|
# echo 1 > snapshot
|
||||||
|
bash: echo: write error: Device or resource busy
|
||||||
|
# cat snapshot
|
||||||
|
cat: snapshot: Device or resource busy
|
||||||
|
|
||||||
-----------
|
-----------
|
||||||
|
|
||||||
More details can be found in the source code, in the
|
More details can be found in the source code, in the
|
||||||
|
@@ -83,6 +83,9 @@ struct trace_iterator {
|
|||||||
long idx;
|
long idx;
|
||||||
|
|
||||||
cpumask_var_t started;
|
cpumask_var_t started;
|
||||||
|
|
||||||
|
/* it's true when current open file is snapshot */
|
||||||
|
bool snapshot;
|
||||||
};
|
};
|
||||||
|
|
||||||
enum trace_iter_flags {
|
enum trace_iter_flags {
|
||||||
|
@@ -167,6 +167,7 @@ unsigned long ring_buffer_entries_cpu(struct ring_buffer *buffer, int cpu);
|
|||||||
unsigned long ring_buffer_overrun_cpu(struct ring_buffer *buffer, int cpu);
|
unsigned long ring_buffer_overrun_cpu(struct ring_buffer *buffer, int cpu);
|
||||||
unsigned long ring_buffer_commit_overrun_cpu(struct ring_buffer *buffer, int cpu);
|
unsigned long ring_buffer_commit_overrun_cpu(struct ring_buffer *buffer, int cpu);
|
||||||
unsigned long ring_buffer_dropped_events_cpu(struct ring_buffer *buffer, int cpu);
|
unsigned long ring_buffer_dropped_events_cpu(struct ring_buffer *buffer, int cpu);
|
||||||
|
unsigned long ring_buffer_read_events_cpu(struct ring_buffer *buffer, int cpu);
|
||||||
|
|
||||||
u64 ring_buffer_time_stamp(struct ring_buffer *buffer, int cpu);
|
u64 ring_buffer_time_stamp(struct ring_buffer *buffer, int cpu);
|
||||||
void ring_buffer_normalize_time_stamp(struct ring_buffer *buffer,
|
void ring_buffer_normalize_time_stamp(struct ring_buffer *buffer,
|
||||||
|
@@ -253,6 +253,16 @@ config FTRACE_SYSCALLS
|
|||||||
help
|
help
|
||||||
Basic tracer to catch the syscall entry and exit events.
|
Basic tracer to catch the syscall entry and exit events.
|
||||||
|
|
||||||
|
config TRACER_SNAPSHOT
|
||||||
|
bool "Create a snapshot trace buffer"
|
||||||
|
select TRACER_MAX_TRACE
|
||||||
|
help
|
||||||
|
Allow tracing users to take snapshot of the current buffer using the
|
||||||
|
ftrace interface, e.g.:
|
||||||
|
|
||||||
|
echo 1 > /sys/kernel/debug/tracing/snapshot
|
||||||
|
cat snapshot
|
||||||
|
|
||||||
config TRACE_BRANCH_PROFILING
|
config TRACE_BRANCH_PROFILING
|
||||||
bool
|
bool
|
||||||
select GENERIC_TRACER
|
select GENERIC_TRACER
|
||||||
|
@@ -3102,6 +3102,24 @@ ring_buffer_dropped_events_cpu(struct ring_buffer *buffer, int cpu)
|
|||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(ring_buffer_dropped_events_cpu);
|
EXPORT_SYMBOL_GPL(ring_buffer_dropped_events_cpu);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* ring_buffer_read_events_cpu - get the number of events successfully read
|
||||||
|
* @buffer: The ring buffer
|
||||||
|
* @cpu: The per CPU buffer to get the number of events read
|
||||||
|
*/
|
||||||
|
unsigned long
|
||||||
|
ring_buffer_read_events_cpu(struct ring_buffer *buffer, int cpu)
|
||||||
|
{
|
||||||
|
struct ring_buffer_per_cpu *cpu_buffer;
|
||||||
|
|
||||||
|
if (!cpumask_test_cpu(cpu, buffer->cpumask))
|
||||||
|
return 0;
|
||||||
|
|
||||||
|
cpu_buffer = buffer->buffers[cpu];
|
||||||
|
return cpu_buffer->read;
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL_GPL(ring_buffer_read_events_cpu);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* ring_buffer_entries - get the number of entries in a buffer
|
* ring_buffer_entries - get the number of entries in a buffer
|
||||||
* @buffer: The ring buffer
|
* @buffer: The ring buffer
|
||||||
|
@@ -249,7 +249,7 @@ static unsigned long trace_buf_size = TRACE_BUF_SIZE_DEFAULT;
|
|||||||
static struct tracer *trace_types __read_mostly;
|
static struct tracer *trace_types __read_mostly;
|
||||||
|
|
||||||
/* current_trace points to the tracer that is currently active */
|
/* current_trace points to the tracer that is currently active */
|
||||||
static struct tracer *current_trace __read_mostly;
|
static struct tracer *current_trace __read_mostly = &nop_trace;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* trace_types_lock is used to protect the trace_types list.
|
* trace_types_lock is used to protect the trace_types list.
|
||||||
@@ -710,12 +710,11 @@ update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
|
|||||||
|
|
||||||
WARN_ON_ONCE(!irqs_disabled());
|
WARN_ON_ONCE(!irqs_disabled());
|
||||||
|
|
||||||
/* If we disabled the tracer, stop now */
|
if (!current_trace->allocated_snapshot) {
|
||||||
if (current_trace == &nop_trace)
|
/* Only the nop tracer should hit this when disabling */
|
||||||
return;
|
WARN_ON_ONCE(current_trace != &nop_trace);
|
||||||
|
|
||||||
if (WARN_ON_ONCE(!current_trace->use_max_tr))
|
|
||||||
return;
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
arch_spin_lock(&ftrace_max_lock);
|
arch_spin_lock(&ftrace_max_lock);
|
||||||
|
|
||||||
@@ -743,10 +742,8 @@ update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu)
|
|||||||
return;
|
return;
|
||||||
|
|
||||||
WARN_ON_ONCE(!irqs_disabled());
|
WARN_ON_ONCE(!irqs_disabled());
|
||||||
if (!current_trace->use_max_tr) {
|
if (WARN_ON_ONCE(!current_trace->allocated_snapshot))
|
||||||
WARN_ON_ONCE(1);
|
|
||||||
return;
|
return;
|
||||||
}
|
|
||||||
|
|
||||||
arch_spin_lock(&ftrace_max_lock);
|
arch_spin_lock(&ftrace_max_lock);
|
||||||
|
|
||||||
@@ -866,10 +863,13 @@ int register_tracer(struct tracer *type)
|
|||||||
|
|
||||||
current_trace = type;
|
current_trace = type;
|
||||||
|
|
||||||
/* If we expanded the buffers, make sure the max is expanded too */
|
if (type->use_max_tr) {
|
||||||
if (ring_buffer_expanded && type->use_max_tr)
|
/* If we expanded the buffers, make sure the max is expanded too */
|
||||||
ring_buffer_resize(max_tr.buffer, trace_buf_size,
|
if (ring_buffer_expanded)
|
||||||
RING_BUFFER_ALL_CPUS);
|
ring_buffer_resize(max_tr.buffer, trace_buf_size,
|
||||||
|
RING_BUFFER_ALL_CPUS);
|
||||||
|
type->allocated_snapshot = true;
|
||||||
|
}
|
||||||
|
|
||||||
/* the test is responsible for initializing and enabling */
|
/* the test is responsible for initializing and enabling */
|
||||||
pr_info("Testing tracer %s: ", type->name);
|
pr_info("Testing tracer %s: ", type->name);
|
||||||
@@ -885,10 +885,14 @@ int register_tracer(struct tracer *type)
|
|||||||
/* Only reset on passing, to avoid touching corrupted buffers */
|
/* Only reset on passing, to avoid touching corrupted buffers */
|
||||||
tracing_reset_online_cpus(tr);
|
tracing_reset_online_cpus(tr);
|
||||||
|
|
||||||
/* Shrink the max buffer again */
|
if (type->use_max_tr) {
|
||||||
if (ring_buffer_expanded && type->use_max_tr)
|
type->allocated_snapshot = false;
|
||||||
ring_buffer_resize(max_tr.buffer, 1,
|
|
||||||
RING_BUFFER_ALL_CPUS);
|
/* Shrink the max buffer again */
|
||||||
|
if (ring_buffer_expanded)
|
||||||
|
ring_buffer_resize(max_tr.buffer, 1,
|
||||||
|
RING_BUFFER_ALL_CPUS);
|
||||||
|
}
|
||||||
|
|
||||||
printk(KERN_CONT "PASSED\n");
|
printk(KERN_CONT "PASSED\n");
|
||||||
}
|
}
|
||||||
@@ -1344,7 +1348,7 @@ static void __ftrace_trace_stack(struct ring_buffer *buffer,
|
|||||||
*/
|
*/
|
||||||
preempt_disable_notrace();
|
preempt_disable_notrace();
|
||||||
|
|
||||||
use_stack = ++__get_cpu_var(ftrace_stack_reserve);
|
use_stack = __this_cpu_inc_return(ftrace_stack_reserve);
|
||||||
/*
|
/*
|
||||||
* We don't need any atomic variables, just a barrier.
|
* We don't need any atomic variables, just a barrier.
|
||||||
* If an interrupt comes in, we don't care, because it would
|
* If an interrupt comes in, we don't care, because it would
|
||||||
@@ -1398,7 +1402,7 @@ static void __ftrace_trace_stack(struct ring_buffer *buffer,
|
|||||||
out:
|
out:
|
||||||
/* Again, don't let gcc optimize things here */
|
/* Again, don't let gcc optimize things here */
|
||||||
barrier();
|
barrier();
|
||||||
__get_cpu_var(ftrace_stack_reserve)--;
|
__this_cpu_dec(ftrace_stack_reserve);
|
||||||
preempt_enable_notrace();
|
preempt_enable_notrace();
|
||||||
|
|
||||||
}
|
}
|
||||||
@@ -1948,21 +1952,27 @@ void tracing_iter_reset(struct trace_iterator *iter, int cpu)
|
|||||||
static void *s_start(struct seq_file *m, loff_t *pos)
|
static void *s_start(struct seq_file *m, loff_t *pos)
|
||||||
{
|
{
|
||||||
struct trace_iterator *iter = m->private;
|
struct trace_iterator *iter = m->private;
|
||||||
static struct tracer *old_tracer;
|
|
||||||
int cpu_file = iter->cpu_file;
|
int cpu_file = iter->cpu_file;
|
||||||
void *p = NULL;
|
void *p = NULL;
|
||||||
loff_t l = 0;
|
loff_t l = 0;
|
||||||
int cpu;
|
int cpu;
|
||||||
|
|
||||||
/* copy the tracer to avoid using a global lock all around */
|
/*
|
||||||
|
* copy the tracer to avoid using a global lock all around.
|
||||||
|
* iter->trace is a copy of current_trace, the pointer to the
|
||||||
|
* name may be used instead of a strcmp(), as iter->trace->name
|
||||||
|
* will point to the same string as current_trace->name.
|
||||||
|
*/
|
||||||
mutex_lock(&trace_types_lock);
|
mutex_lock(&trace_types_lock);
|
||||||
if (unlikely(old_tracer != current_trace && current_trace)) {
|
if (unlikely(current_trace && iter->trace->name != current_trace->name))
|
||||||
old_tracer = current_trace;
|
|
||||||
*iter->trace = *current_trace;
|
*iter->trace = *current_trace;
|
||||||
}
|
|
||||||
mutex_unlock(&trace_types_lock);
|
mutex_unlock(&trace_types_lock);
|
||||||
|
|
||||||
atomic_inc(&trace_record_cmdline_disabled);
|
if (iter->snapshot && iter->trace->use_max_tr)
|
||||||
|
return ERR_PTR(-EBUSY);
|
||||||
|
|
||||||
|
if (!iter->snapshot)
|
||||||
|
atomic_inc(&trace_record_cmdline_disabled);
|
||||||
|
|
||||||
if (*pos != iter->pos) {
|
if (*pos != iter->pos) {
|
||||||
iter->ent = NULL;
|
iter->ent = NULL;
|
||||||
@@ -2001,7 +2011,11 @@ static void s_stop(struct seq_file *m, void *p)
|
|||||||
{
|
{
|
||||||
struct trace_iterator *iter = m->private;
|
struct trace_iterator *iter = m->private;
|
||||||
|
|
||||||
atomic_dec(&trace_record_cmdline_disabled);
|
if (iter->snapshot && iter->trace->use_max_tr)
|
||||||
|
return;
|
||||||
|
|
||||||
|
if (!iter->snapshot)
|
||||||
|
atomic_dec(&trace_record_cmdline_disabled);
|
||||||
trace_access_unlock(iter->cpu_file);
|
trace_access_unlock(iter->cpu_file);
|
||||||
trace_event_read_unlock();
|
trace_event_read_unlock();
|
||||||
}
|
}
|
||||||
@@ -2086,8 +2100,7 @@ print_trace_header(struct seq_file *m, struct trace_iterator *iter)
|
|||||||
unsigned long total;
|
unsigned long total;
|
||||||
const char *name = "preemption";
|
const char *name = "preemption";
|
||||||
|
|
||||||
if (type)
|
name = type->name;
|
||||||
name = type->name;
|
|
||||||
|
|
||||||
get_total_entries(tr, &total, &entries);
|
get_total_entries(tr, &total, &entries);
|
||||||
|
|
||||||
@@ -2436,7 +2449,7 @@ static const struct seq_operations tracer_seq_ops = {
|
|||||||
};
|
};
|
||||||
|
|
||||||
static struct trace_iterator *
|
static struct trace_iterator *
|
||||||
__tracing_open(struct inode *inode, struct file *file)
|
__tracing_open(struct inode *inode, struct file *file, bool snapshot)
|
||||||
{
|
{
|
||||||
long cpu_file = (long) inode->i_private;
|
long cpu_file = (long) inode->i_private;
|
||||||
struct trace_iterator *iter;
|
struct trace_iterator *iter;
|
||||||
@@ -2463,16 +2476,16 @@ __tracing_open(struct inode *inode, struct file *file)
|
|||||||
if (!iter->trace)
|
if (!iter->trace)
|
||||||
goto fail;
|
goto fail;
|
||||||
|
|
||||||
if (current_trace)
|
*iter->trace = *current_trace;
|
||||||
*iter->trace = *current_trace;
|
|
||||||
|
|
||||||
if (!zalloc_cpumask_var(&iter->started, GFP_KERNEL))
|
if (!zalloc_cpumask_var(&iter->started, GFP_KERNEL))
|
||||||
goto fail;
|
goto fail;
|
||||||
|
|
||||||
if (current_trace && current_trace->print_max)
|
if (current_trace->print_max || snapshot)
|
||||||
iter->tr = &max_tr;
|
iter->tr = &max_tr;
|
||||||
else
|
else
|
||||||
iter->tr = &global_trace;
|
iter->tr = &global_trace;
|
||||||
|
iter->snapshot = snapshot;
|
||||||
iter->pos = -1;
|
iter->pos = -1;
|
||||||
mutex_init(&iter->mutex);
|
mutex_init(&iter->mutex);
|
||||||
iter->cpu_file = cpu_file;
|
iter->cpu_file = cpu_file;
|
||||||
@@ -2489,8 +2502,9 @@ __tracing_open(struct inode *inode, struct file *file)
|
|||||||
if (trace_clocks[trace_clock_id].in_ns)
|
if (trace_clocks[trace_clock_id].in_ns)
|
||||||
iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
|
iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
|
||||||
|
|
||||||
/* stop the trace while dumping */
|
/* stop the trace while dumping if we are not opening "snapshot" */
|
||||||
tracing_stop();
|
if (!iter->snapshot)
|
||||||
|
tracing_stop();
|
||||||
|
|
||||||
if (iter->cpu_file == TRACE_PIPE_ALL_CPU) {
|
if (iter->cpu_file == TRACE_PIPE_ALL_CPU) {
|
||||||
for_each_tracing_cpu(cpu) {
|
for_each_tracing_cpu(cpu) {
|
||||||
@@ -2553,8 +2567,9 @@ static int tracing_release(struct inode *inode, struct file *file)
|
|||||||
if (iter->trace && iter->trace->close)
|
if (iter->trace && iter->trace->close)
|
||||||
iter->trace->close(iter);
|
iter->trace->close(iter);
|
||||||
|
|
||||||
/* reenable tracing if it was previously enabled */
|
if (!iter->snapshot)
|
||||||
tracing_start();
|
/* reenable tracing if it was previously enabled */
|
||||||
|
tracing_start();
|
||||||
mutex_unlock(&trace_types_lock);
|
mutex_unlock(&trace_types_lock);
|
||||||
|
|
||||||
mutex_destroy(&iter->mutex);
|
mutex_destroy(&iter->mutex);
|
||||||
@@ -2582,7 +2597,7 @@ static int tracing_open(struct inode *inode, struct file *file)
|
|||||||
}
|
}
|
||||||
|
|
||||||
if (file->f_mode & FMODE_READ) {
|
if (file->f_mode & FMODE_READ) {
|
||||||
iter = __tracing_open(inode, file);
|
iter = __tracing_open(inode, file, false);
|
||||||
if (IS_ERR(iter))
|
if (IS_ERR(iter))
|
||||||
ret = PTR_ERR(iter);
|
ret = PTR_ERR(iter);
|
||||||
else if (trace_flags & TRACE_ITER_LATENCY_FMT)
|
else if (trace_flags & TRACE_ITER_LATENCY_FMT)
|
||||||
@@ -3020,10 +3035,7 @@ tracing_set_trace_read(struct file *filp, char __user *ubuf,
|
|||||||
int r;
|
int r;
|
||||||
|
|
||||||
mutex_lock(&trace_types_lock);
|
mutex_lock(&trace_types_lock);
|
||||||
if (current_trace)
|
r = sprintf(buf, "%s\n", current_trace->name);
|
||||||
r = sprintf(buf, "%s\n", current_trace->name);
|
|
||||||
else
|
|
||||||
r = sprintf(buf, "\n");
|
|
||||||
mutex_unlock(&trace_types_lock);
|
mutex_unlock(&trace_types_lock);
|
||||||
|
|
||||||
return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
|
return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
|
||||||
@@ -3214,10 +3226,10 @@ static int tracing_set_tracer(const char *buf)
|
|||||||
goto out;
|
goto out;
|
||||||
|
|
||||||
trace_branch_disable();
|
trace_branch_disable();
|
||||||
if (current_trace && current_trace->reset)
|
if (current_trace->reset)
|
||||||
current_trace->reset(tr);
|
current_trace->reset(tr);
|
||||||
|
|
||||||
had_max_tr = current_trace && current_trace->use_max_tr;
|
had_max_tr = current_trace->allocated_snapshot;
|
||||||
current_trace = &nop_trace;
|
current_trace = &nop_trace;
|
||||||
|
|
||||||
if (had_max_tr && !t->use_max_tr) {
|
if (had_max_tr && !t->use_max_tr) {
|
||||||
@@ -3236,6 +3248,8 @@ static int tracing_set_tracer(const char *buf)
|
|||||||
*/
|
*/
|
||||||
ring_buffer_resize(max_tr.buffer, 1, RING_BUFFER_ALL_CPUS);
|
ring_buffer_resize(max_tr.buffer, 1, RING_BUFFER_ALL_CPUS);
|
||||||
set_buffer_entries(&max_tr, 1);
|
set_buffer_entries(&max_tr, 1);
|
||||||
|
tracing_reset_online_cpus(&max_tr);
|
||||||
|
current_trace->allocated_snapshot = false;
|
||||||
}
|
}
|
||||||
destroy_trace_option_files(topts);
|
destroy_trace_option_files(topts);
|
||||||
|
|
||||||
@@ -3246,6 +3260,7 @@ static int tracing_set_tracer(const char *buf)
|
|||||||
RING_BUFFER_ALL_CPUS);
|
RING_BUFFER_ALL_CPUS);
|
||||||
if (ret < 0)
|
if (ret < 0)
|
||||||
goto out;
|
goto out;
|
||||||
|
t->allocated_snapshot = true;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (t->init) {
|
if (t->init) {
|
||||||
@@ -3353,8 +3368,7 @@ static int tracing_open_pipe(struct inode *inode, struct file *filp)
|
|||||||
ret = -ENOMEM;
|
ret = -ENOMEM;
|
||||||
goto fail;
|
goto fail;
|
||||||
}
|
}
|
||||||
if (current_trace)
|
*iter->trace = *current_trace;
|
||||||
*iter->trace = *current_trace;
|
|
||||||
|
|
||||||
if (!alloc_cpumask_var(&iter->started, GFP_KERNEL)) {
|
if (!alloc_cpumask_var(&iter->started, GFP_KERNEL)) {
|
||||||
ret = -ENOMEM;
|
ret = -ENOMEM;
|
||||||
@@ -3494,7 +3508,6 @@ tracing_read_pipe(struct file *filp, char __user *ubuf,
|
|||||||
size_t cnt, loff_t *ppos)
|
size_t cnt, loff_t *ppos)
|
||||||
{
|
{
|
||||||
struct trace_iterator *iter = filp->private_data;
|
struct trace_iterator *iter = filp->private_data;
|
||||||
static struct tracer *old_tracer;
|
|
||||||
ssize_t sret;
|
ssize_t sret;
|
||||||
|
|
||||||
/* return any leftover data */
|
/* return any leftover data */
|
||||||
@@ -3506,10 +3519,8 @@ tracing_read_pipe(struct file *filp, char __user *ubuf,
|
|||||||
|
|
||||||
/* copy the tracer to avoid using a global lock all around */
|
/* copy the tracer to avoid using a global lock all around */
|
||||||
mutex_lock(&trace_types_lock);
|
mutex_lock(&trace_types_lock);
|
||||||
if (unlikely(old_tracer != current_trace && current_trace)) {
|
if (unlikely(iter->trace->name != current_trace->name))
|
||||||
old_tracer = current_trace;
|
|
||||||
*iter->trace = *current_trace;
|
*iter->trace = *current_trace;
|
||||||
}
|
|
||||||
mutex_unlock(&trace_types_lock);
|
mutex_unlock(&trace_types_lock);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@@ -3665,7 +3676,6 @@ static ssize_t tracing_splice_read_pipe(struct file *filp,
|
|||||||
.ops = &tracing_pipe_buf_ops,
|
.ops = &tracing_pipe_buf_ops,
|
||||||
.spd_release = tracing_spd_release_pipe,
|
.spd_release = tracing_spd_release_pipe,
|
||||||
};
|
};
|
||||||
static struct tracer *old_tracer;
|
|
||||||
ssize_t ret;
|
ssize_t ret;
|
||||||
size_t rem;
|
size_t rem;
|
||||||
unsigned int i;
|
unsigned int i;
|
||||||
@@ -3675,10 +3685,8 @@ static ssize_t tracing_splice_read_pipe(struct file *filp,
|
|||||||
|
|
||||||
/* copy the tracer to avoid using a global lock all around */
|
/* copy the tracer to avoid using a global lock all around */
|
||||||
mutex_lock(&trace_types_lock);
|
mutex_lock(&trace_types_lock);
|
||||||
if (unlikely(old_tracer != current_trace && current_trace)) {
|
if (unlikely(iter->trace->name != current_trace->name))
|
||||||
old_tracer = current_trace;
|
|
||||||
*iter->trace = *current_trace;
|
*iter->trace = *current_trace;
|
||||||
}
|
|
||||||
mutex_unlock(&trace_types_lock);
|
mutex_unlock(&trace_types_lock);
|
||||||
|
|
||||||
mutex_lock(&iter->mutex);
|
mutex_lock(&iter->mutex);
|
||||||
@@ -4070,6 +4078,87 @@ static int tracing_clock_open(struct inode *inode, struct file *file)
|
|||||||
return single_open(file, tracing_clock_show, NULL);
|
return single_open(file, tracing_clock_show, NULL);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#ifdef CONFIG_TRACER_SNAPSHOT
|
||||||
|
static int tracing_snapshot_open(struct inode *inode, struct file *file)
|
||||||
|
{
|
||||||
|
struct trace_iterator *iter;
|
||||||
|
int ret = 0;
|
||||||
|
|
||||||
|
if (file->f_mode & FMODE_READ) {
|
||||||
|
iter = __tracing_open(inode, file, true);
|
||||||
|
if (IS_ERR(iter))
|
||||||
|
ret = PTR_ERR(iter);
|
||||||
|
}
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
|
static ssize_t
|
||||||
|
tracing_snapshot_write(struct file *filp, const char __user *ubuf, size_t cnt,
|
||||||
|
loff_t *ppos)
|
||||||
|
{
|
||||||
|
unsigned long val;
|
||||||
|
int ret;
|
||||||
|
|
||||||
|
ret = tracing_update_buffers();
|
||||||
|
if (ret < 0)
|
||||||
|
return ret;
|
||||||
|
|
||||||
|
ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
|
||||||
|
if (ret)
|
||||||
|
return ret;
|
||||||
|
|
||||||
|
mutex_lock(&trace_types_lock);
|
||||||
|
|
||||||
|
if (current_trace->use_max_tr) {
|
||||||
|
ret = -EBUSY;
|
||||||
|
goto out;
|
||||||
|
}
|
||||||
|
|
||||||
|
switch (val) {
|
||||||
|
case 0:
|
||||||
|
if (current_trace->allocated_snapshot) {
|
||||||
|
/* free spare buffer */
|
||||||
|
ring_buffer_resize(max_tr.buffer, 1,
|
||||||
|
RING_BUFFER_ALL_CPUS);
|
||||||
|
set_buffer_entries(&max_tr, 1);
|
||||||
|
tracing_reset_online_cpus(&max_tr);
|
||||||
|
current_trace->allocated_snapshot = false;
|
||||||
|
}
|
||||||
|
break;
|
||||||
|
case 1:
|
||||||
|
if (!current_trace->allocated_snapshot) {
|
||||||
|
/* allocate spare buffer */
|
||||||
|
ret = resize_buffer_duplicate_size(&max_tr,
|
||||||
|
&global_trace, RING_BUFFER_ALL_CPUS);
|
||||||
|
if (ret < 0)
|
||||||
|
break;
|
||||||
|
current_trace->allocated_snapshot = true;
|
||||||
|
}
|
||||||
|
|
||||||
|
local_irq_disable();
|
||||||
|
/* Now, we're going to swap */
|
||||||
|
update_max_tr(&global_trace, current, smp_processor_id());
|
||||||
|
local_irq_enable();
|
||||||
|
break;
|
||||||
|
default:
|
||||||
|
if (current_trace->allocated_snapshot)
|
||||||
|
tracing_reset_online_cpus(&max_tr);
|
||||||
|
else
|
||||||
|
ret = -EINVAL;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (ret >= 0) {
|
||||||
|
*ppos += cnt;
|
||||||
|
ret = cnt;
|
||||||
|
}
|
||||||
|
out:
|
||||||
|
mutex_unlock(&trace_types_lock);
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
#endif /* CONFIG_TRACER_SNAPSHOT */
|
||||||
|
|
||||||
|
|
||||||
static const struct file_operations tracing_max_lat_fops = {
|
static const struct file_operations tracing_max_lat_fops = {
|
||||||
.open = tracing_open_generic,
|
.open = tracing_open_generic,
|
||||||
.read = tracing_max_lat_read,
|
.read = tracing_max_lat_read,
|
||||||
@@ -4126,6 +4215,16 @@ static const struct file_operations trace_clock_fops = {
|
|||||||
.write = tracing_clock_write,
|
.write = tracing_clock_write,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
#ifdef CONFIG_TRACER_SNAPSHOT
|
||||||
|
static const struct file_operations snapshot_fops = {
|
||||||
|
.open = tracing_snapshot_open,
|
||||||
|
.read = seq_read,
|
||||||
|
.write = tracing_snapshot_write,
|
||||||
|
.llseek = tracing_seek,
|
||||||
|
.release = tracing_release,
|
||||||
|
};
|
||||||
|
#endif /* CONFIG_TRACER_SNAPSHOT */
|
||||||
|
|
||||||
struct ftrace_buffer_info {
|
struct ftrace_buffer_info {
|
||||||
struct trace_array *tr;
|
struct trace_array *tr;
|
||||||
void *spare;
|
void *spare;
|
||||||
@@ -4430,6 +4529,9 @@ tracing_stats_read(struct file *filp, char __user *ubuf,
|
|||||||
cnt = ring_buffer_dropped_events_cpu(tr->buffer, cpu);
|
cnt = ring_buffer_dropped_events_cpu(tr->buffer, cpu);
|
||||||
trace_seq_printf(s, "dropped events: %ld\n", cnt);
|
trace_seq_printf(s, "dropped events: %ld\n", cnt);
|
||||||
|
|
||||||
|
cnt = ring_buffer_read_events_cpu(tr->buffer, cpu);
|
||||||
|
trace_seq_printf(s, "read events: %ld\n", cnt);
|
||||||
|
|
||||||
count = simple_read_from_buffer(ubuf, count, ppos, s->buffer, s->len);
|
count = simple_read_from_buffer(ubuf, count, ppos, s->buffer, s->len);
|
||||||
|
|
||||||
kfree(s);
|
kfree(s);
|
||||||
@@ -4506,7 +4608,7 @@ struct dentry *tracing_init_dentry(void)
|
|||||||
|
|
||||||
static struct dentry *d_percpu;
|
static struct dentry *d_percpu;
|
||||||
|
|
||||||
struct dentry *tracing_dentry_percpu(void)
|
static struct dentry *tracing_dentry_percpu(void)
|
||||||
{
|
{
|
||||||
static int once;
|
static int once;
|
||||||
struct dentry *d_tracer;
|
struct dentry *d_tracer;
|
||||||
@@ -4922,6 +5024,11 @@ static __init int tracer_init_debugfs(void)
|
|||||||
&ftrace_update_tot_cnt, &tracing_dyn_info_fops);
|
&ftrace_update_tot_cnt, &tracing_dyn_info_fops);
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
#ifdef CONFIG_TRACER_SNAPSHOT
|
||||||
|
trace_create_file("snapshot", 0644, d_tracer,
|
||||||
|
(void *) TRACE_PIPE_ALL_CPU, &snapshot_fops);
|
||||||
|
#endif
|
||||||
|
|
||||||
create_trace_options_dir();
|
create_trace_options_dir();
|
||||||
|
|
||||||
for_each_tracing_cpu(cpu)
|
for_each_tracing_cpu(cpu)
|
||||||
@@ -5030,6 +5137,7 @@ __ftrace_dump(bool disable_tracing, enum ftrace_dump_mode oops_dump_mode)
|
|||||||
if (disable_tracing)
|
if (disable_tracing)
|
||||||
ftrace_kill();
|
ftrace_kill();
|
||||||
|
|
||||||
|
/* Simulate the iterator */
|
||||||
trace_init_global_iter(&iter);
|
trace_init_global_iter(&iter);
|
||||||
|
|
||||||
for_each_tracing_cpu(cpu) {
|
for_each_tracing_cpu(cpu) {
|
||||||
@@ -5041,10 +5149,6 @@ __ftrace_dump(bool disable_tracing, enum ftrace_dump_mode oops_dump_mode)
|
|||||||
/* don't look at user memory in panic mode */
|
/* don't look at user memory in panic mode */
|
||||||
trace_flags &= ~TRACE_ITER_SYM_USEROBJ;
|
trace_flags &= ~TRACE_ITER_SYM_USEROBJ;
|
||||||
|
|
||||||
/* Simulate the iterator */
|
|
||||||
iter.tr = &global_trace;
|
|
||||||
iter.trace = current_trace;
|
|
||||||
|
|
||||||
switch (oops_dump_mode) {
|
switch (oops_dump_mode) {
|
||||||
case DUMP_ALL:
|
case DUMP_ALL:
|
||||||
iter.cpu_file = TRACE_PIPE_ALL_CPU;
|
iter.cpu_file = TRACE_PIPE_ALL_CPU;
|
||||||
@@ -5189,7 +5293,7 @@ __init static int tracer_alloc_buffers(void)
|
|||||||
init_irq_work(&trace_work_wakeup, trace_wake_up);
|
init_irq_work(&trace_work_wakeup, trace_wake_up);
|
||||||
|
|
||||||
register_tracer(&nop_trace);
|
register_tracer(&nop_trace);
|
||||||
current_trace = &nop_trace;
|
|
||||||
/* All seems OK, enable tracing */
|
/* All seems OK, enable tracing */
|
||||||
tracing_disabled = 0;
|
tracing_disabled = 0;
|
||||||
|
|
||||||
|
@@ -287,6 +287,7 @@ struct tracer {
|
|||||||
struct tracer_flags *flags;
|
struct tracer_flags *flags;
|
||||||
bool print_max;
|
bool print_max;
|
||||||
bool use_max_tr;
|
bool use_max_tr;
|
||||||
|
bool allocated_snapshot;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
||||||
|
@@ -84,7 +84,7 @@ u64 notrace trace_clock_global(void)
|
|||||||
local_irq_save(flags);
|
local_irq_save(flags);
|
||||||
|
|
||||||
this_cpu = raw_smp_processor_id();
|
this_cpu = raw_smp_processor_id();
|
||||||
now = cpu_clock(this_cpu);
|
now = sched_clock_cpu(this_cpu);
|
||||||
/*
|
/*
|
||||||
* If in an NMI context then dont risk lockups and return the
|
* If in an NMI context then dont risk lockups and return the
|
||||||
* cpu_clock() time:
|
* cpu_clock() time:
|
||||||
|
@@ -191,10 +191,16 @@ unsigned long ftrace_return_to_handler(unsigned long frame_pointer)
|
|||||||
|
|
||||||
ftrace_pop_return_trace(&trace, &ret, frame_pointer);
|
ftrace_pop_return_trace(&trace, &ret, frame_pointer);
|
||||||
trace.rettime = trace_clock_local();
|
trace.rettime = trace_clock_local();
|
||||||
ftrace_graph_return(&trace);
|
|
||||||
barrier();
|
barrier();
|
||||||
current->curr_ret_stack--;
|
current->curr_ret_stack--;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* The trace should run after decrementing the ret counter
|
||||||
|
* in case an interrupt were to come in. We don't want to
|
||||||
|
* lose the interrupt if max_depth is set.
|
||||||
|
*/
|
||||||
|
ftrace_graph_return(&trace);
|
||||||
|
|
||||||
if (unlikely(!ret)) {
|
if (unlikely(!ret)) {
|
||||||
ftrace_graph_stop();
|
ftrace_graph_stop();
|
||||||
WARN_ON(1);
|
WARN_ON(1);
|
||||||
|
@@ -5,12 +5,6 @@ menuconfig SAMPLES
|
|||||||
|
|
||||||
if SAMPLES
|
if SAMPLES
|
||||||
|
|
||||||
config SAMPLE_TRACEPOINTS
|
|
||||||
tristate "Build tracepoints examples -- loadable modules only"
|
|
||||||
depends on TRACEPOINTS && m
|
|
||||||
help
|
|
||||||
This build tracepoints example modules.
|
|
||||||
|
|
||||||
config SAMPLE_TRACE_EVENTS
|
config SAMPLE_TRACE_EVENTS
|
||||||
tristate "Build trace_events examples -- loadable modules only"
|
tristate "Build trace_events examples -- loadable modules only"
|
||||||
depends on EVENT_TRACING && m
|
depends on EVENT_TRACING && m
|
||||||
|
@@ -1,4 +1,4 @@
|
|||||||
# Makefile for Linux samples code
|
# Makefile for Linux samples code
|
||||||
|
|
||||||
obj-$(CONFIG_SAMPLES) += kobject/ kprobes/ tracepoints/ trace_events/ \
|
obj-$(CONFIG_SAMPLES) += kobject/ kprobes/ trace_events/ \
|
||||||
hw_breakpoint/ kfifo/ kdb/ hidraw/ rpmsg/ seccomp/
|
hw_breakpoint/ kfifo/ kdb/ hidraw/ rpmsg/ seccomp/
|
||||||
|
@@ -1,6 +0,0 @@
|
|||||||
# builds the tracepoint example kernel modules;
|
|
||||||
# then to use one (as root): insmod <module_name.ko>
|
|
||||||
|
|
||||||
obj-$(CONFIG_SAMPLE_TRACEPOINTS) += tracepoint-sample.o
|
|
||||||
obj-$(CONFIG_SAMPLE_TRACEPOINTS) += tracepoint-probe-sample.o
|
|
||||||
obj-$(CONFIG_SAMPLE_TRACEPOINTS) += tracepoint-probe-sample2.o
|
|
@@ -1,11 +0,0 @@
|
|||||||
#ifndef _TP_SAMPLES_TRACE_H
|
|
||||||
#define _TP_SAMPLES_TRACE_H
|
|
||||||
|
|
||||||
#include <linux/proc_fs.h> /* for struct inode and struct file */
|
|
||||||
#include <linux/tracepoint.h>
|
|
||||||
|
|
||||||
DECLARE_TRACE(subsys_event,
|
|
||||||
TP_PROTO(struct inode *inode, struct file *file),
|
|
||||||
TP_ARGS(inode, file));
|
|
||||||
DECLARE_TRACE_NOARGS(subsys_eventb);
|
|
||||||
#endif
|
|
@@ -1,57 +0,0 @@
|
|||||||
/*
|
|
||||||
* tracepoint-probe-sample.c
|
|
||||||
*
|
|
||||||
* sample tracepoint probes.
|
|
||||||
*/
|
|
||||||
|
|
||||||
#include <linux/module.h>
|
|
||||||
#include <linux/file.h>
|
|
||||||
#include <linux/dcache.h>
|
|
||||||
#include "tp-samples-trace.h"
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Here the caller only guarantees locking for struct file and struct inode.
|
|
||||||
* Locking must therefore be done in the probe to use the dentry.
|
|
||||||
*/
|
|
||||||
static void probe_subsys_event(void *ignore,
|
|
||||||
struct inode *inode, struct file *file)
|
|
||||||
{
|
|
||||||
path_get(&file->f_path);
|
|
||||||
dget(file->f_path.dentry);
|
|
||||||
printk(KERN_INFO "Event is encountered with filename %s\n",
|
|
||||||
file->f_path.dentry->d_name.name);
|
|
||||||
dput(file->f_path.dentry);
|
|
||||||
path_put(&file->f_path);
|
|
||||||
}
|
|
||||||
|
|
||||||
static void probe_subsys_eventb(void *ignore)
|
|
||||||
{
|
|
||||||
printk(KERN_INFO "Event B is encountered\n");
|
|
||||||
}
|
|
||||||
|
|
||||||
static int __init tp_sample_trace_init(void)
|
|
||||||
{
|
|
||||||
int ret;
|
|
||||||
|
|
||||||
ret = register_trace_subsys_event(probe_subsys_event, NULL);
|
|
||||||
WARN_ON(ret);
|
|
||||||
ret = register_trace_subsys_eventb(probe_subsys_eventb, NULL);
|
|
||||||
WARN_ON(ret);
|
|
||||||
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
module_init(tp_sample_trace_init);
|
|
||||||
|
|
||||||
static void __exit tp_sample_trace_exit(void)
|
|
||||||
{
|
|
||||||
unregister_trace_subsys_eventb(probe_subsys_eventb, NULL);
|
|
||||||
unregister_trace_subsys_event(probe_subsys_event, NULL);
|
|
||||||
tracepoint_synchronize_unregister();
|
|
||||||
}
|
|
||||||
|
|
||||||
module_exit(tp_sample_trace_exit);
|
|
||||||
|
|
||||||
MODULE_LICENSE("GPL");
|
|
||||||
MODULE_AUTHOR("Mathieu Desnoyers");
|
|
||||||
MODULE_DESCRIPTION("Tracepoint Probes Samples");
|
|
@@ -1,44 +0,0 @@
|
|||||||
/*
|
|
||||||
* tracepoint-probe-sample2.c
|
|
||||||
*
|
|
||||||
* 2nd sample tracepoint probes.
|
|
||||||
*/
|
|
||||||
|
|
||||||
#include <linux/module.h>
|
|
||||||
#include <linux/fs.h>
|
|
||||||
#include "tp-samples-trace.h"
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Here the caller only guarantees locking for struct file and struct inode.
|
|
||||||
* Locking must therefore be done in the probe to use the dentry.
|
|
||||||
*/
|
|
||||||
static void probe_subsys_event(void *ignore,
|
|
||||||
struct inode *inode, struct file *file)
|
|
||||||
{
|
|
||||||
printk(KERN_INFO "Event is encountered with inode number %lu\n",
|
|
||||||
inode->i_ino);
|
|
||||||
}
|
|
||||||
|
|
||||||
static int __init tp_sample_trace_init(void)
|
|
||||||
{
|
|
||||||
int ret;
|
|
||||||
|
|
||||||
ret = register_trace_subsys_event(probe_subsys_event, NULL);
|
|
||||||
WARN_ON(ret);
|
|
||||||
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
module_init(tp_sample_trace_init);
|
|
||||||
|
|
||||||
static void __exit tp_sample_trace_exit(void)
|
|
||||||
{
|
|
||||||
unregister_trace_subsys_event(probe_subsys_event, NULL);
|
|
||||||
tracepoint_synchronize_unregister();
|
|
||||||
}
|
|
||||||
|
|
||||||
module_exit(tp_sample_trace_exit);
|
|
||||||
|
|
||||||
MODULE_LICENSE("GPL");
|
|
||||||
MODULE_AUTHOR("Mathieu Desnoyers");
|
|
||||||
MODULE_DESCRIPTION("Tracepoint Probes Samples");
|
|
@@ -1,57 +0,0 @@
|
|||||||
/* tracepoint-sample.c
|
|
||||||
*
|
|
||||||
* Executes a tracepoint when /proc/tracepoint-sample is opened.
|
|
||||||
*
|
|
||||||
* (C) Copyright 2007 Mathieu Desnoyers <mathieu.desnoyers@polymtl.ca>
|
|
||||||
*
|
|
||||||
* This file is released under the GPLv2.
|
|
||||||
* See the file COPYING for more details.
|
|
||||||
*/
|
|
||||||
|
|
||||||
#include <linux/module.h>
|
|
||||||
#include <linux/sched.h>
|
|
||||||
#include <linux/proc_fs.h>
|
|
||||||
#include "tp-samples-trace.h"
|
|
||||||
|
|
||||||
DEFINE_TRACE(subsys_event);
|
|
||||||
DEFINE_TRACE(subsys_eventb);
|
|
||||||
|
|
||||||
struct proc_dir_entry *pentry_sample;
|
|
||||||
|
|
||||||
static int my_open(struct inode *inode, struct file *file)
|
|
||||||
{
|
|
||||||
int i;
|
|
||||||
|
|
||||||
trace_subsys_event(inode, file);
|
|
||||||
for (i = 0; i < 10; i++)
|
|
||||||
trace_subsys_eventb();
|
|
||||||
return -EPERM;
|
|
||||||
}
|
|
||||||
|
|
||||||
static const struct file_operations mark_ops = {
|
|
||||||
.open = my_open,
|
|
||||||
.llseek = noop_llseek,
|
|
||||||
};
|
|
||||||
|
|
||||||
static int __init sample_init(void)
|
|
||||||
{
|
|
||||||
printk(KERN_ALERT "sample init\n");
|
|
||||||
pentry_sample = proc_create("tracepoint-sample", 0444, NULL,
|
|
||||||
&mark_ops);
|
|
||||||
if (!pentry_sample)
|
|
||||||
return -EPERM;
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
static void __exit sample_exit(void)
|
|
||||||
{
|
|
||||||
printk(KERN_ALERT "sample exit\n");
|
|
||||||
remove_proc_entry("tracepoint-sample", NULL);
|
|
||||||
}
|
|
||||||
|
|
||||||
module_init(sample_init)
|
|
||||||
module_exit(sample_exit)
|
|
||||||
|
|
||||||
MODULE_LICENSE("GPL");
|
|
||||||
MODULE_AUTHOR("Mathieu Desnoyers");
|
|
||||||
MODULE_DESCRIPTION("Tracepoint sample");
|
|
Reference in New Issue
Block a user