Merge branch 'perf-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip
* 'perf-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip: (163 commits) tracing: Fix compile issue for trace_sched_wakeup.c [S390] hardirq: remove pointless header file includes [IA64] Move local_softirq_pending() definition perf, powerpc: Fix power_pmu_event_init to not use event->ctx ftrace: Remove recursion between recordmcount and scripts/mod/empty jump_label: Add COND_STMT(), reducer wrappery perf: Optimize sw events perf: Use jump_labels to optimize the scheduler hooks jump_label: Add atomic_t interface jump_label: Use more consistent naming perf, hw_breakpoint: Fix crash in hw_breakpoint creation perf: Find task before event alloc perf: Fix task refcount bugs perf: Fix group moving irq_work: Add generic hardirq context callbacks perf_events: Fix transaction recovery in group_sched_in() perf_events: Fix bogus AMD64 generic TLB events perf_events: Fix bogus context time tracking tracing: Remove parent recording in latency tracer graph options tracing: Use one prologue for the preempt irqs off tracer function tracers ...
This commit is contained in:
@@ -2606,6 +2606,19 @@ void ring_buffer_record_enable_cpu(struct ring_buffer *buffer, int cpu)
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(ring_buffer_record_enable_cpu);
|
||||
|
||||
/*
|
||||
* The total entries in the ring buffer is the running counter
|
||||
* of entries entered into the ring buffer, minus the sum of
|
||||
* the entries read from the ring buffer and the number of
|
||||
* entries that were overwritten.
|
||||
*/
|
||||
static inline unsigned long
|
||||
rb_num_of_entries(struct ring_buffer_per_cpu *cpu_buffer)
|
||||
{
|
||||
return local_read(&cpu_buffer->entries) -
|
||||
(local_read(&cpu_buffer->overrun) + cpu_buffer->read);
|
||||
}
|
||||
|
||||
/**
|
||||
* ring_buffer_entries_cpu - get the number of entries in a cpu buffer
|
||||
* @buffer: The ring buffer
|
||||
@@ -2614,16 +2627,13 @@ EXPORT_SYMBOL_GPL(ring_buffer_record_enable_cpu);
|
||||
unsigned long ring_buffer_entries_cpu(struct ring_buffer *buffer, int cpu)
|
||||
{
|
||||
struct ring_buffer_per_cpu *cpu_buffer;
|
||||
unsigned long ret;
|
||||
|
||||
if (!cpumask_test_cpu(cpu, buffer->cpumask))
|
||||
return 0;
|
||||
|
||||
cpu_buffer = buffer->buffers[cpu];
|
||||
ret = (local_read(&cpu_buffer->entries) - local_read(&cpu_buffer->overrun))
|
||||
- cpu_buffer->read;
|
||||
|
||||
return ret;
|
||||
return rb_num_of_entries(cpu_buffer);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(ring_buffer_entries_cpu);
|
||||
|
||||
@@ -2684,8 +2694,7 @@ unsigned long ring_buffer_entries(struct ring_buffer *buffer)
|
||||
/* if you care about this being correct, lock the buffer */
|
||||
for_each_buffer_cpu(buffer, cpu) {
|
||||
cpu_buffer = buffer->buffers[cpu];
|
||||
entries += (local_read(&cpu_buffer->entries) -
|
||||
local_read(&cpu_buffer->overrun)) - cpu_buffer->read;
|
||||
entries += rb_num_of_entries(cpu_buffer);
|
||||
}
|
||||
|
||||
return entries;
|
||||
|
Reference in New Issue
Block a user