Merge branch 'oprofile-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip
* 'oprofile-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip: (24 commits) oprofile/x86: make AMD IBS hotplug capable oprofile/x86: notify cpus only when daemon is running oprofile/x86: reordering some functions oprofile/x86: stop disabled counters in nmi handler oprofile/x86: protect cpu hotplug sections oprofile/x86: remove CONFIG_SMP macros oprofile/x86: fix uninitialized counter usage during cpu hotplug oprofile/x86: remove duplicate IBS capability check oprofile/x86: move IBS code oprofile/x86: return -EBUSY if counters are already reserved oprofile/x86: moving shutdown functions oprofile/x86: reserve counter msrs pairwise oprofile/x86: rework error handler in nmi_setup() oprofile: update file list in MAINTAINERS file oprofile: protect from not being in an IRQ context oprofile: remove double ring buffering ring-buffer: Add lost event count to end of sub buffer tracing: Show the lost events in the trace_pipe output ring-buffer: Add place holder recording of dropped events tracing: Fix compile error in module tracepoints when MODULE_UNLOAD not set ...
This commit is contained in:
@@ -59,8 +59,6 @@
|
||||
#define CREATE_TRACE_POINTS
|
||||
#include <trace/events/module.h>
|
||||
|
||||
EXPORT_TRACEPOINT_SYMBOL(module_get);
|
||||
|
||||
#if 0
|
||||
#define DEBUGP printk
|
||||
#else
|
||||
@@ -515,6 +513,9 @@ MODINFO_ATTR(srcversion);
|
||||
static char last_unloaded_module[MODULE_NAME_LEN+1];
|
||||
|
||||
#ifdef CONFIG_MODULE_UNLOAD
|
||||
|
||||
EXPORT_TRACEPOINT_SYMBOL(module_get);
|
||||
|
||||
/* Init the unload section of the module. */
|
||||
static void module_unload_init(struct module *mod)
|
||||
{
|
||||
@@ -867,8 +868,7 @@ void module_put(struct module *module)
|
||||
smp_wmb(); /* see comment in module_refcount */
|
||||
__this_cpu_inc(module->refptr->decs);
|
||||
|
||||
trace_module_put(module, _RET_IP_,
|
||||
__this_cpu_read(module->refptr->decs));
|
||||
trace_module_put(module, _RET_IP_);
|
||||
/* Maybe they're waiting for us to drop reference? */
|
||||
if (unlikely(!module_is_live(module)))
|
||||
wake_up_process(module->waiter);
|
||||
|
@@ -319,6 +319,11 @@ EXPORT_SYMBOL_GPL(ring_buffer_event_data);
|
||||
#define TS_MASK ((1ULL << TS_SHIFT) - 1)
|
||||
#define TS_DELTA_TEST (~TS_MASK)
|
||||
|
||||
/* Flag when events were overwritten */
|
||||
#define RB_MISSED_EVENTS (1 << 31)
|
||||
/* Missed count stored at end */
|
||||
#define RB_MISSED_STORED (1 << 30)
|
||||
|
||||
struct buffer_data_page {
|
||||
u64 time_stamp; /* page time stamp */
|
||||
local_t commit; /* write committed index */
|
||||
@@ -338,6 +343,7 @@ struct buffer_page {
|
||||
local_t write; /* index for next write */
|
||||
unsigned read; /* index for next read */
|
||||
local_t entries; /* entries on this page */
|
||||
unsigned long real_end; /* real end of data */
|
||||
struct buffer_data_page *page; /* Actual data page */
|
||||
};
|
||||
|
||||
@@ -417,6 +423,12 @@ int ring_buffer_print_page_header(struct trace_seq *s)
|
||||
(unsigned int)sizeof(field.commit),
|
||||
(unsigned int)is_signed_type(long));
|
||||
|
||||
ret = trace_seq_printf(s, "\tfield: int overwrite;\t"
|
||||
"offset:%u;\tsize:%u;\tsigned:%u;\n",
|
||||
(unsigned int)offsetof(typeof(field), commit),
|
||||
1,
|
||||
(unsigned int)is_signed_type(long));
|
||||
|
||||
ret = trace_seq_printf(s, "\tfield: char data;\t"
|
||||
"offset:%u;\tsize:%u;\tsigned:%u;\n",
|
||||
(unsigned int)offsetof(typeof(field), data),
|
||||
@@ -440,6 +452,8 @@ struct ring_buffer_per_cpu {
|
||||
struct buffer_page *tail_page; /* write to tail */
|
||||
struct buffer_page *commit_page; /* committed pages */
|
||||
struct buffer_page *reader_page;
|
||||
unsigned long lost_events;
|
||||
unsigned long last_overrun;
|
||||
local_t commit_overrun;
|
||||
local_t overrun;
|
||||
local_t entries;
|
||||
@@ -1761,6 +1775,13 @@ rb_reset_tail(struct ring_buffer_per_cpu *cpu_buffer,
|
||||
event = __rb_page_index(tail_page, tail);
|
||||
kmemcheck_annotate_bitfield(event, bitfield);
|
||||
|
||||
/*
|
||||
* Save the original length to the meta data.
|
||||
* This will be used by the reader to add lost event
|
||||
* counter.
|
||||
*/
|
||||
tail_page->real_end = tail;
|
||||
|
||||
/*
|
||||
* If this event is bigger than the minimum size, then
|
||||
* we need to be careful that we don't subtract the
|
||||
@@ -2838,6 +2859,7 @@ static struct buffer_page *
|
||||
rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
|
||||
{
|
||||
struct buffer_page *reader = NULL;
|
||||
unsigned long overwrite;
|
||||
unsigned long flags;
|
||||
int nr_loops = 0;
|
||||
int ret;
|
||||
@@ -2879,6 +2901,7 @@ rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
|
||||
local_set(&cpu_buffer->reader_page->write, 0);
|
||||
local_set(&cpu_buffer->reader_page->entries, 0);
|
||||
local_set(&cpu_buffer->reader_page->page->commit, 0);
|
||||
cpu_buffer->reader_page->real_end = 0;
|
||||
|
||||
spin:
|
||||
/*
|
||||
@@ -2898,6 +2921,18 @@ rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
|
||||
/* The reader page will be pointing to the new head */
|
||||
rb_set_list_to_head(cpu_buffer, &cpu_buffer->reader_page->list);
|
||||
|
||||
/*
|
||||
* We want to make sure we read the overruns after we set up our
|
||||
* pointers to the next object. The writer side does a
|
||||
* cmpxchg to cross pages which acts as the mb on the writer
|
||||
* side. Note, the reader will constantly fail the swap
|
||||
* while the writer is updating the pointers, so this
|
||||
* guarantees that the overwrite recorded here is the one we
|
||||
* want to compare with the last_overrun.
|
||||
*/
|
||||
smp_mb();
|
||||
overwrite = local_read(&(cpu_buffer->overrun));
|
||||
|
||||
/*
|
||||
* Here's the tricky part.
|
||||
*
|
||||
@@ -2929,6 +2964,11 @@ rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
|
||||
cpu_buffer->reader_page = reader;
|
||||
rb_reset_reader_page(cpu_buffer);
|
||||
|
||||
if (overwrite != cpu_buffer->last_overrun) {
|
||||
cpu_buffer->lost_events = overwrite - cpu_buffer->last_overrun;
|
||||
cpu_buffer->last_overrun = overwrite;
|
||||
}
|
||||
|
||||
goto again;
|
||||
|
||||
out:
|
||||
@@ -3005,8 +3045,14 @@ static void rb_advance_iter(struct ring_buffer_iter *iter)
|
||||
rb_advance_iter(iter);
|
||||
}
|
||||
|
||||
static int rb_lost_events(struct ring_buffer_per_cpu *cpu_buffer)
|
||||
{
|
||||
return cpu_buffer->lost_events;
|
||||
}
|
||||
|
||||
static struct ring_buffer_event *
|
||||
rb_buffer_peek(struct ring_buffer_per_cpu *cpu_buffer, u64 *ts)
|
||||
rb_buffer_peek(struct ring_buffer_per_cpu *cpu_buffer, u64 *ts,
|
||||
unsigned long *lost_events)
|
||||
{
|
||||
struct ring_buffer_event *event;
|
||||
struct buffer_page *reader;
|
||||
@@ -3058,6 +3104,8 @@ rb_buffer_peek(struct ring_buffer_per_cpu *cpu_buffer, u64 *ts)
|
||||
ring_buffer_normalize_time_stamp(cpu_buffer->buffer,
|
||||
cpu_buffer->cpu, ts);
|
||||
}
|
||||
if (lost_events)
|
||||
*lost_events = rb_lost_events(cpu_buffer);
|
||||
return event;
|
||||
|
||||
default:
|
||||
@@ -3168,12 +3216,14 @@ static inline int rb_ok_to_lock(void)
|
||||
* @buffer: The ring buffer to read
|
||||
* @cpu: The cpu to peak at
|
||||
* @ts: The timestamp counter of this event.
|
||||
* @lost_events: a variable to store if events were lost (may be NULL)
|
||||
*
|
||||
* This will return the event that will be read next, but does
|
||||
* not consume the data.
|
||||
*/
|
||||
struct ring_buffer_event *
|
||||
ring_buffer_peek(struct ring_buffer *buffer, int cpu, u64 *ts)
|
||||
ring_buffer_peek(struct ring_buffer *buffer, int cpu, u64 *ts,
|
||||
unsigned long *lost_events)
|
||||
{
|
||||
struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu];
|
||||
struct ring_buffer_event *event;
|
||||
@@ -3188,7 +3238,7 @@ ring_buffer_peek(struct ring_buffer *buffer, int cpu, u64 *ts)
|
||||
local_irq_save(flags);
|
||||
if (dolock)
|
||||
spin_lock(&cpu_buffer->reader_lock);
|
||||
event = rb_buffer_peek(cpu_buffer, ts);
|
||||
event = rb_buffer_peek(cpu_buffer, ts, lost_events);
|
||||
if (event && event->type_len == RINGBUF_TYPE_PADDING)
|
||||
rb_advance_reader(cpu_buffer);
|
||||
if (dolock)
|
||||
@@ -3230,13 +3280,17 @@ ring_buffer_iter_peek(struct ring_buffer_iter *iter, u64 *ts)
|
||||
/**
|
||||
* ring_buffer_consume - return an event and consume it
|
||||
* @buffer: The ring buffer to get the next event from
|
||||
* @cpu: the cpu to read the buffer from
|
||||
* @ts: a variable to store the timestamp (may be NULL)
|
||||
* @lost_events: a variable to store if events were lost (may be NULL)
|
||||
*
|
||||
* Returns the next event in the ring buffer, and that event is consumed.
|
||||
* Meaning, that sequential reads will keep returning a different event,
|
||||
* and eventually empty the ring buffer if the producer is slower.
|
||||
*/
|
||||
struct ring_buffer_event *
|
||||
ring_buffer_consume(struct ring_buffer *buffer, int cpu, u64 *ts)
|
||||
ring_buffer_consume(struct ring_buffer *buffer, int cpu, u64 *ts,
|
||||
unsigned long *lost_events)
|
||||
{
|
||||
struct ring_buffer_per_cpu *cpu_buffer;
|
||||
struct ring_buffer_event *event = NULL;
|
||||
@@ -3257,9 +3311,11 @@ ring_buffer_consume(struct ring_buffer *buffer, int cpu, u64 *ts)
|
||||
if (dolock)
|
||||
spin_lock(&cpu_buffer->reader_lock);
|
||||
|
||||
event = rb_buffer_peek(cpu_buffer, ts);
|
||||
if (event)
|
||||
event = rb_buffer_peek(cpu_buffer, ts, lost_events);
|
||||
if (event) {
|
||||
cpu_buffer->lost_events = 0;
|
||||
rb_advance_reader(cpu_buffer);
|
||||
}
|
||||
|
||||
if (dolock)
|
||||
spin_unlock(&cpu_buffer->reader_lock);
|
||||
@@ -3408,6 +3464,9 @@ rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer)
|
||||
cpu_buffer->write_stamp = 0;
|
||||
cpu_buffer->read_stamp = 0;
|
||||
|
||||
cpu_buffer->lost_events = 0;
|
||||
cpu_buffer->last_overrun = 0;
|
||||
|
||||
rb_head_page_activate(cpu_buffer);
|
||||
}
|
||||
|
||||
@@ -3683,6 +3742,7 @@ int ring_buffer_read_page(struct ring_buffer *buffer,
|
||||
struct ring_buffer_event *event;
|
||||
struct buffer_data_page *bpage;
|
||||
struct buffer_page *reader;
|
||||
unsigned long missed_events;
|
||||
unsigned long flags;
|
||||
unsigned int commit;
|
||||
unsigned int read;
|
||||
@@ -3719,6 +3779,9 @@ int ring_buffer_read_page(struct ring_buffer *buffer,
|
||||
read = reader->read;
|
||||
commit = rb_page_commit(reader);
|
||||
|
||||
/* Check if any events were dropped */
|
||||
missed_events = cpu_buffer->lost_events;
|
||||
|
||||
/*
|
||||
* If this page has been partially read or
|
||||
* if len is not big enough to read the rest of the page or
|
||||
@@ -3779,9 +3842,35 @@ int ring_buffer_read_page(struct ring_buffer *buffer,
|
||||
local_set(&reader->entries, 0);
|
||||
reader->read = 0;
|
||||
*data_page = bpage;
|
||||
|
||||
/*
|
||||
* Use the real_end for the data size,
|
||||
* This gives us a chance to store the lost events
|
||||
* on the page.
|
||||
*/
|
||||
if (reader->real_end)
|
||||
local_set(&bpage->commit, reader->real_end);
|
||||
}
|
||||
ret = read;
|
||||
|
||||
cpu_buffer->lost_events = 0;
|
||||
/*
|
||||
* Set a flag in the commit field if we lost events
|
||||
*/
|
||||
if (missed_events) {
|
||||
commit = local_read(&bpage->commit);
|
||||
|
||||
/* If there is room at the end of the page to save the
|
||||
* missed events, then record it there.
|
||||
*/
|
||||
if (BUF_PAGE_SIZE - commit >= sizeof(missed_events)) {
|
||||
memcpy(&bpage->data[commit], &missed_events,
|
||||
sizeof(missed_events));
|
||||
local_add(RB_MISSED_STORED, &bpage->commit);
|
||||
}
|
||||
local_add(RB_MISSED_EVENTS, &bpage->commit);
|
||||
}
|
||||
|
||||
out_unlock:
|
||||
spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
|
||||
|
||||
|
@@ -81,7 +81,7 @@ static enum event_status read_event(int cpu)
|
||||
int *entry;
|
||||
u64 ts;
|
||||
|
||||
event = ring_buffer_consume(buffer, cpu, &ts);
|
||||
event = ring_buffer_consume(buffer, cpu, &ts, NULL);
|
||||
if (!event)
|
||||
return EVENT_DROPPED;
|
||||
|
||||
|
@@ -1545,7 +1545,8 @@ static void trace_iterator_increment(struct trace_iterator *iter)
|
||||
}
|
||||
|
||||
static struct trace_entry *
|
||||
peek_next_entry(struct trace_iterator *iter, int cpu, u64 *ts)
|
||||
peek_next_entry(struct trace_iterator *iter, int cpu, u64 *ts,
|
||||
unsigned long *lost_events)
|
||||
{
|
||||
struct ring_buffer_event *event;
|
||||
struct ring_buffer_iter *buf_iter = iter->buffer_iter[cpu];
|
||||
@@ -1556,7 +1557,8 @@ peek_next_entry(struct trace_iterator *iter, int cpu, u64 *ts)
|
||||
if (buf_iter)
|
||||
event = ring_buffer_iter_peek(buf_iter, ts);
|
||||
else
|
||||
event = ring_buffer_peek(iter->tr->buffer, cpu, ts);
|
||||
event = ring_buffer_peek(iter->tr->buffer, cpu, ts,
|
||||
lost_events);
|
||||
|
||||
ftrace_enable_cpu();
|
||||
|
||||
@@ -1564,10 +1566,12 @@ peek_next_entry(struct trace_iterator *iter, int cpu, u64 *ts)
|
||||
}
|
||||
|
||||
static struct trace_entry *
|
||||
__find_next_entry(struct trace_iterator *iter, int *ent_cpu, u64 *ent_ts)
|
||||
__find_next_entry(struct trace_iterator *iter, int *ent_cpu,
|
||||
unsigned long *missing_events, u64 *ent_ts)
|
||||
{
|
||||
struct ring_buffer *buffer = iter->tr->buffer;
|
||||
struct trace_entry *ent, *next = NULL;
|
||||
unsigned long lost_events, next_lost = 0;
|
||||
int cpu_file = iter->cpu_file;
|
||||
u64 next_ts = 0, ts;
|
||||
int next_cpu = -1;
|
||||
@@ -1580,7 +1584,7 @@ __find_next_entry(struct trace_iterator *iter, int *ent_cpu, u64 *ent_ts)
|
||||
if (cpu_file > TRACE_PIPE_ALL_CPU) {
|
||||
if (ring_buffer_empty_cpu(buffer, cpu_file))
|
||||
return NULL;
|
||||
ent = peek_next_entry(iter, cpu_file, ent_ts);
|
||||
ent = peek_next_entry(iter, cpu_file, ent_ts, missing_events);
|
||||
if (ent_cpu)
|
||||
*ent_cpu = cpu_file;
|
||||
|
||||
@@ -1592,7 +1596,7 @@ __find_next_entry(struct trace_iterator *iter, int *ent_cpu, u64 *ent_ts)
|
||||
if (ring_buffer_empty_cpu(buffer, cpu))
|
||||
continue;
|
||||
|
||||
ent = peek_next_entry(iter, cpu, &ts);
|
||||
ent = peek_next_entry(iter, cpu, &ts, &lost_events);
|
||||
|
||||
/*
|
||||
* Pick the entry with the smallest timestamp:
|
||||
@@ -1601,6 +1605,7 @@ __find_next_entry(struct trace_iterator *iter, int *ent_cpu, u64 *ent_ts)
|
||||
next = ent;
|
||||
next_cpu = cpu;
|
||||
next_ts = ts;
|
||||
next_lost = lost_events;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1610,6 +1615,9 @@ __find_next_entry(struct trace_iterator *iter, int *ent_cpu, u64 *ent_ts)
|
||||
if (ent_ts)
|
||||
*ent_ts = next_ts;
|
||||
|
||||
if (missing_events)
|
||||
*missing_events = next_lost;
|
||||
|
||||
return next;
|
||||
}
|
||||
|
||||
@@ -1617,13 +1625,14 @@ __find_next_entry(struct trace_iterator *iter, int *ent_cpu, u64 *ent_ts)
|
||||
struct trace_entry *trace_find_next_entry(struct trace_iterator *iter,
|
||||
int *ent_cpu, u64 *ent_ts)
|
||||
{
|
||||
return __find_next_entry(iter, ent_cpu, ent_ts);
|
||||
return __find_next_entry(iter, ent_cpu, NULL, ent_ts);
|
||||
}
|
||||
|
||||
/* Find the next real entry, and increment the iterator to the next entry */
|
||||
static void *find_next_entry_inc(struct trace_iterator *iter)
|
||||
{
|
||||
iter->ent = __find_next_entry(iter, &iter->cpu, &iter->ts);
|
||||
iter->ent = __find_next_entry(iter, &iter->cpu,
|
||||
&iter->lost_events, &iter->ts);
|
||||
|
||||
if (iter->ent)
|
||||
trace_iterator_increment(iter);
|
||||
@@ -1635,7 +1644,8 @@ static void trace_consume(struct trace_iterator *iter)
|
||||
{
|
||||
/* Don't allow ftrace to trace into the ring buffers */
|
||||
ftrace_disable_cpu();
|
||||
ring_buffer_consume(iter->tr->buffer, iter->cpu, &iter->ts);
|
||||
ring_buffer_consume(iter->tr->buffer, iter->cpu, &iter->ts,
|
||||
&iter->lost_events);
|
||||
ftrace_enable_cpu();
|
||||
}
|
||||
|
||||
@@ -2030,6 +2040,10 @@ static enum print_line_t print_trace_line(struct trace_iterator *iter)
|
||||
{
|
||||
enum print_line_t ret;
|
||||
|
||||
if (iter->lost_events)
|
||||
trace_seq_printf(&iter->seq, "CPU:%d [LOST %lu EVENTS]\n",
|
||||
iter->cpu, iter->lost_events);
|
||||
|
||||
if (iter->trace && iter->trace->print_line) {
|
||||
ret = iter->trace->print_line(iter);
|
||||
if (ret != TRACE_TYPE_UNHANDLED)
|
||||
|
@@ -490,9 +490,10 @@ get_return_for_leaf(struct trace_iterator *iter,
|
||||
* We need to consume the current entry to see
|
||||
* the next one.
|
||||
*/
|
||||
ring_buffer_consume(iter->tr->buffer, iter->cpu, NULL);
|
||||
ring_buffer_consume(iter->tr->buffer, iter->cpu,
|
||||
NULL, NULL);
|
||||
event = ring_buffer_peek(iter->tr->buffer, iter->cpu,
|
||||
NULL);
|
||||
NULL, NULL);
|
||||
}
|
||||
|
||||
if (!event)
|
||||
|
@@ -30,7 +30,7 @@ static int trace_test_buffer_cpu(struct trace_array *tr, int cpu)
|
||||
struct trace_entry *entry;
|
||||
unsigned int loops = 0;
|
||||
|
||||
while ((event = ring_buffer_consume(tr->buffer, cpu, NULL))) {
|
||||
while ((event = ring_buffer_consume(tr->buffer, cpu, NULL, NULL))) {
|
||||
entry = ring_buffer_event_data(event);
|
||||
|
||||
/*
|
||||
|
Reference in New Issue
Block a user