Merge branch 'perf/urgent' of git://git.kernel.org/pub/scm/linux/kernel/git/frederic/random-tracing into perf/urgent
This commit is contained in:
@@ -1702,7 +1702,6 @@ struct perf_callchain_entry *perf_callchain(struct pt_regs *regs)
|
|||||||
return entry;
|
return entry;
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef CONFIG_EVENT_TRACING
|
|
||||||
void perf_arch_fetch_caller_regs(struct pt_regs *regs, unsigned long ip, int skip)
|
void perf_arch_fetch_caller_regs(struct pt_regs *regs, unsigned long ip, int skip)
|
||||||
{
|
{
|
||||||
regs->ip = ip;
|
regs->ip = ip;
|
||||||
@@ -1714,4 +1713,3 @@ void perf_arch_fetch_caller_regs(struct pt_regs *regs, unsigned long ip, int ski
|
|||||||
regs->cs = __KERNEL_CS;
|
regs->cs = __KERNEL_CS;
|
||||||
local_save_flags(regs->flags);
|
local_save_flags(regs->flags);
|
||||||
}
|
}
|
||||||
#endif
|
|
||||||
|
@@ -618,8 +618,8 @@ int kgdb_arch_init(void)
|
|||||||
* portion of kgdb because this operation requires mutexs to
|
* portion of kgdb because this operation requires mutexs to
|
||||||
* complete.
|
* complete.
|
||||||
*/
|
*/
|
||||||
|
hw_breakpoint_init(&attr);
|
||||||
attr.bp_addr = (unsigned long)kgdb_arch_init;
|
attr.bp_addr = (unsigned long)kgdb_arch_init;
|
||||||
attr.type = PERF_TYPE_BREAKPOINT;
|
|
||||||
attr.bp_len = HW_BREAKPOINT_LEN_1;
|
attr.bp_len = HW_BREAKPOINT_LEN_1;
|
||||||
attr.bp_type = HW_BREAKPOINT_W;
|
attr.bp_type = HW_BREAKPOINT_W;
|
||||||
attr.disabled = 1;
|
attr.disabled = 1;
|
||||||
|
@@ -842,13 +842,6 @@ extern atomic_t perf_swevent_enabled[PERF_COUNT_SW_MAX];
|
|||||||
|
|
||||||
extern void __perf_sw_event(u32, u64, int, struct pt_regs *, u64);
|
extern void __perf_sw_event(u32, u64, int, struct pt_regs *, u64);
|
||||||
|
|
||||||
static inline void
|
|
||||||
perf_sw_event(u32 event_id, u64 nr, int nmi, struct pt_regs *regs, u64 addr)
|
|
||||||
{
|
|
||||||
if (atomic_read(&perf_swevent_enabled[event_id]))
|
|
||||||
__perf_sw_event(event_id, nr, nmi, regs, addr);
|
|
||||||
}
|
|
||||||
|
|
||||||
extern void
|
extern void
|
||||||
perf_arch_fetch_caller_regs(struct pt_regs *regs, unsigned long ip, int skip);
|
perf_arch_fetch_caller_regs(struct pt_regs *regs, unsigned long ip, int skip);
|
||||||
|
|
||||||
@@ -887,6 +880,20 @@ static inline void perf_fetch_caller_regs(struct pt_regs *regs, int skip)
|
|||||||
return perf_arch_fetch_caller_regs(regs, ip, skip);
|
return perf_arch_fetch_caller_regs(regs, ip, skip);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static inline void
|
||||||
|
perf_sw_event(u32 event_id, u64 nr, int nmi, struct pt_regs *regs, u64 addr)
|
||||||
|
{
|
||||||
|
if (atomic_read(&perf_swevent_enabled[event_id])) {
|
||||||
|
struct pt_regs hot_regs;
|
||||||
|
|
||||||
|
if (!regs) {
|
||||||
|
perf_fetch_caller_regs(&hot_regs, 1);
|
||||||
|
regs = &hot_regs;
|
||||||
|
}
|
||||||
|
__perf_sw_event(event_id, nr, nmi, regs, addr);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
extern void __perf_event_mmap(struct vm_area_struct *vma);
|
extern void __perf_event_mmap(struct vm_area_struct *vma);
|
||||||
|
|
||||||
static inline void perf_event_mmap(struct vm_area_struct *vma)
|
static inline void perf_event_mmap(struct vm_area_struct *vma)
|
||||||
|
@@ -1164,11 +1164,9 @@ void perf_event_task_sched_out(struct task_struct *task,
|
|||||||
struct perf_event_context *ctx = task->perf_event_ctxp;
|
struct perf_event_context *ctx = task->perf_event_ctxp;
|
||||||
struct perf_event_context *next_ctx;
|
struct perf_event_context *next_ctx;
|
||||||
struct perf_event_context *parent;
|
struct perf_event_context *parent;
|
||||||
struct pt_regs *regs;
|
|
||||||
int do_switch = 1;
|
int do_switch = 1;
|
||||||
|
|
||||||
regs = task_pt_regs(task);
|
perf_sw_event(PERF_COUNT_SW_CONTEXT_SWITCHES, 1, 1, NULL, 0);
|
||||||
perf_sw_event(PERF_COUNT_SW_CONTEXT_SWITCHES, 1, 1, regs, 0);
|
|
||||||
|
|
||||||
if (likely(!ctx || !cpuctx->task_ctx))
|
if (likely(!ctx || !cpuctx->task_ctx))
|
||||||
return;
|
return;
|
||||||
|
@@ -17,7 +17,12 @@ EXPORT_SYMBOL_GPL(perf_arch_fetch_caller_regs);
|
|||||||
static char *perf_trace_buf;
|
static char *perf_trace_buf;
|
||||||
static char *perf_trace_buf_nmi;
|
static char *perf_trace_buf_nmi;
|
||||||
|
|
||||||
typedef typeof(char [PERF_MAX_TRACE_SIZE]) perf_trace_t ;
|
/*
|
||||||
|
* Force it to be aligned to unsigned long to avoid misaligned accesses
|
||||||
|
* suprises
|
||||||
|
*/
|
||||||
|
typedef typeof(unsigned long [PERF_MAX_TRACE_SIZE / sizeof(unsigned long)])
|
||||||
|
perf_trace_t;
|
||||||
|
|
||||||
/* Count the events in use (per event id, not per instance) */
|
/* Count the events in use (per event id, not per instance) */
|
||||||
static int total_ref_count;
|
static int total_ref_count;
|
||||||
@@ -130,6 +135,8 @@ __kprobes void *perf_trace_buf_prepare(int size, unsigned short type,
|
|||||||
char *trace_buf, *raw_data;
|
char *trace_buf, *raw_data;
|
||||||
int pc, cpu;
|
int pc, cpu;
|
||||||
|
|
||||||
|
BUILD_BUG_ON(PERF_MAX_TRACE_SIZE % sizeof(unsigned long));
|
||||||
|
|
||||||
pc = preempt_count();
|
pc = preempt_count();
|
||||||
|
|
||||||
/* Protect the per cpu buffer, begin the rcu read side */
|
/* Protect the per cpu buffer, begin the rcu read side */
|
||||||
@@ -152,7 +159,7 @@ __kprobes void *perf_trace_buf_prepare(int size, unsigned short type,
|
|||||||
raw_data = per_cpu_ptr(trace_buf, cpu);
|
raw_data = per_cpu_ptr(trace_buf, cpu);
|
||||||
|
|
||||||
/* zero the dead bytes from align to not leak stack to user */
|
/* zero the dead bytes from align to not leak stack to user */
|
||||||
*(u64 *)(&raw_data[size - sizeof(u64)]) = 0ULL;
|
memset(&raw_data[size - sizeof(u64)], 0, sizeof(u64));
|
||||||
|
|
||||||
entry = (struct trace_entry *)raw_data;
|
entry = (struct trace_entry *)raw_data;
|
||||||
tracing_generic_entry_update(entry, *irq_flags, pc);
|
tracing_generic_entry_update(entry, *irq_flags, pc);
|
||||||
|
Reference in New Issue
Block a user