Merge remote branch 'tip/perf/core' into oprofile/core
Conflicts: arch/arm/oprofile/common.c kernel/perf_event.c
This commit is contained in:
@@ -529,7 +529,6 @@ struct hw_perf_event {
|
||||
int last_cpu;
|
||||
};
|
||||
struct { /* software */
|
||||
s64 remaining;
|
||||
struct hrtimer hrtimer;
|
||||
};
|
||||
#ifdef CONFIG_HAVE_HW_BREAKPOINT
|
||||
@@ -539,6 +538,7 @@ struct hw_perf_event {
|
||||
};
|
||||
#endif
|
||||
};
|
||||
int state;
|
||||
local64_t prev_count;
|
||||
u64 sample_period;
|
||||
u64 last_period;
|
||||
@@ -550,6 +550,13 @@ struct hw_perf_event {
|
||||
#endif
|
||||
};
|
||||
|
||||
/*
|
||||
* hw_perf_event::state flags
|
||||
*/
|
||||
#define PERF_HES_STOPPED 0x01 /* the counter is stopped */
|
||||
#define PERF_HES_UPTODATE 0x02 /* event->count up-to-date */
|
||||
#define PERF_HES_ARCH 0x04
|
||||
|
||||
struct perf_event;
|
||||
|
||||
/*
|
||||
@@ -561,36 +568,70 @@ struct perf_event;
|
||||
* struct pmu - generic performance monitoring unit
|
||||
*/
|
||||
struct pmu {
|
||||
int (*enable) (struct perf_event *event);
|
||||
void (*disable) (struct perf_event *event);
|
||||
int (*start) (struct perf_event *event);
|
||||
void (*stop) (struct perf_event *event);
|
||||
struct list_head entry;
|
||||
|
||||
int * __percpu pmu_disable_count;
|
||||
struct perf_cpu_context * __percpu pmu_cpu_context;
|
||||
int task_ctx_nr;
|
||||
|
||||
/*
|
||||
* Fully disable/enable this PMU, can be used to protect from the PMI
|
||||
* as well as for lazy/batch writing of the MSRs.
|
||||
*/
|
||||
void (*pmu_enable) (struct pmu *pmu); /* optional */
|
||||
void (*pmu_disable) (struct pmu *pmu); /* optional */
|
||||
|
||||
/*
|
||||
* Try and initialize the event for this PMU.
|
||||
* Should return -ENOENT when the @event doesn't match this PMU.
|
||||
*/
|
||||
int (*event_init) (struct perf_event *event);
|
||||
|
||||
#define PERF_EF_START 0x01 /* start the counter when adding */
|
||||
#define PERF_EF_RELOAD 0x02 /* reload the counter when starting */
|
||||
#define PERF_EF_UPDATE 0x04 /* update the counter when stopping */
|
||||
|
||||
/*
|
||||
* Adds/Removes a counter to/from the PMU, can be done inside
|
||||
* a transaction, see the ->*_txn() methods.
|
||||
*/
|
||||
int (*add) (struct perf_event *event, int flags);
|
||||
void (*del) (struct perf_event *event, int flags);
|
||||
|
||||
/*
|
||||
* Starts/Stops a counter present on the PMU. The PMI handler
|
||||
* should stop the counter when perf_event_overflow() returns
|
||||
* !0. ->start() will be used to continue.
|
||||
*/
|
||||
void (*start) (struct perf_event *event, int flags);
|
||||
void (*stop) (struct perf_event *event, int flags);
|
||||
|
||||
/*
|
||||
* Updates the counter value of the event.
|
||||
*/
|
||||
void (*read) (struct perf_event *event);
|
||||
void (*unthrottle) (struct perf_event *event);
|
||||
|
||||
/*
|
||||
* Group events scheduling is treated as a transaction, add group
|
||||
* events as a whole and perform one schedulability test. If the test
|
||||
* fails, roll back the whole group
|
||||
* Group events scheduling is treated as a transaction, add
|
||||
* group events as a whole and perform one schedulability test.
|
||||
* If the test fails, roll back the whole group
|
||||
*
|
||||
* Start the transaction, after this ->add() doesn't need to
|
||||
* do schedulability tests.
|
||||
*/
|
||||
|
||||
void (*start_txn) (struct pmu *pmu); /* optional */
|
||||
/*
|
||||
* Start the transaction, after this ->enable() doesn't need
|
||||
* to do schedulability tests.
|
||||
*/
|
||||
void (*start_txn) (const struct pmu *pmu);
|
||||
/*
|
||||
* If ->start_txn() disabled the ->enable() schedulability test
|
||||
* If ->start_txn() disabled the ->add() schedulability test
|
||||
* then ->commit_txn() is required to perform one. On success
|
||||
* the transaction is closed. On error the transaction is kept
|
||||
* open until ->cancel_txn() is called.
|
||||
*/
|
||||
int (*commit_txn) (const struct pmu *pmu);
|
||||
int (*commit_txn) (struct pmu *pmu); /* optional */
|
||||
/*
|
||||
* Will cancel the transaction, assumes ->disable() is called for
|
||||
* each successfull ->enable() during the transaction.
|
||||
* Will cancel the transaction, assumes ->del() is called
|
||||
* for each successfull ->add() during the transaction.
|
||||
*/
|
||||
void (*cancel_txn) (const struct pmu *pmu);
|
||||
void (*cancel_txn) (struct pmu *pmu); /* optional */
|
||||
};
|
||||
|
||||
/**
|
||||
@@ -669,7 +710,7 @@ struct perf_event {
|
||||
int nr_siblings;
|
||||
int group_flags;
|
||||
struct perf_event *group_leader;
|
||||
const struct pmu *pmu;
|
||||
struct pmu *pmu;
|
||||
|
||||
enum perf_event_active_state state;
|
||||
unsigned int attach_state;
|
||||
@@ -763,12 +804,19 @@ struct perf_event {
|
||||
#endif /* CONFIG_PERF_EVENTS */
|
||||
};
|
||||
|
||||
enum perf_event_context_type {
|
||||
task_context,
|
||||
cpu_context,
|
||||
};
|
||||
|
||||
/**
|
||||
* struct perf_event_context - event context structure
|
||||
*
|
||||
* Used as a container for task events and CPU events as well:
|
||||
*/
|
||||
struct perf_event_context {
|
||||
enum perf_event_context_type type;
|
||||
struct pmu *pmu;
|
||||
/*
|
||||
* Protect the states of the events in the list,
|
||||
* nr_active, and the list:
|
||||
@@ -808,6 +856,12 @@ struct perf_event_context {
|
||||
struct rcu_head rcu_head;
|
||||
};
|
||||
|
||||
/*
|
||||
* Number of contexts where an event can trigger:
|
||||
* task, softirq, hardirq, nmi.
|
||||
*/
|
||||
#define PERF_NR_CONTEXTS 4
|
||||
|
||||
/**
|
||||
* struct perf_event_cpu_context - per cpu event context structure
|
||||
*/
|
||||
@@ -815,18 +869,9 @@ struct perf_cpu_context {
|
||||
struct perf_event_context ctx;
|
||||
struct perf_event_context *task_ctx;
|
||||
int active_oncpu;
|
||||
int max_pertask;
|
||||
int exclusive;
|
||||
struct swevent_hlist *swevent_hlist;
|
||||
struct mutex hlist_mutex;
|
||||
int hlist_refcount;
|
||||
|
||||
/*
|
||||
* Recursion avoidance:
|
||||
*
|
||||
* task, softirq, irq, nmi context
|
||||
*/
|
||||
int recursion[4];
|
||||
struct list_head rotation_list;
|
||||
int jiffies_interval;
|
||||
};
|
||||
|
||||
struct perf_output_handle {
|
||||
@@ -842,28 +887,22 @@ struct perf_output_handle {
|
||||
|
||||
#ifdef CONFIG_PERF_EVENTS
|
||||
|
||||
/*
|
||||
* Set by architecture code:
|
||||
*/
|
||||
extern int perf_max_events;
|
||||
|
||||
extern const struct pmu *hw_perf_event_init(struct perf_event *event);
|
||||
extern int perf_pmu_register(struct pmu *pmu);
|
||||
extern void perf_pmu_unregister(struct pmu *pmu);
|
||||
|
||||
extern int perf_num_counters(void);
|
||||
extern const char *perf_pmu_name(void);
|
||||
extern void perf_event_task_sched_in(struct task_struct *task);
|
||||
extern void perf_event_task_sched_out(struct task_struct *task, struct task_struct *next);
|
||||
extern void perf_event_task_tick(struct task_struct *task);
|
||||
extern int perf_event_init_task(struct task_struct *child);
|
||||
extern void perf_event_exit_task(struct task_struct *child);
|
||||
extern void perf_event_free_task(struct task_struct *task);
|
||||
extern void perf_event_delayed_put(struct task_struct *task);
|
||||
extern void set_perf_event_pending(void);
|
||||
extern void perf_event_do_pending(void);
|
||||
extern void perf_event_print_debug(void);
|
||||
extern void __perf_disable(void);
|
||||
extern bool __perf_enable(void);
|
||||
extern void perf_disable(void);
|
||||
extern void perf_enable(void);
|
||||
extern void perf_pmu_disable(struct pmu *pmu);
|
||||
extern void perf_pmu_enable(struct pmu *pmu);
|
||||
extern int perf_event_task_disable(void);
|
||||
extern int perf_event_task_enable(void);
|
||||
extern void perf_event_update_userpage(struct perf_event *event);
|
||||
@@ -871,7 +910,7 @@ extern int perf_event_release_kernel(struct perf_event *event);
|
||||
extern struct perf_event *
|
||||
perf_event_create_kernel_counter(struct perf_event_attr *attr,
|
||||
int cpu,
|
||||
pid_t pid,
|
||||
struct task_struct *task,
|
||||
perf_overflow_handler_t callback);
|
||||
extern u64 perf_event_read_value(struct perf_event *event,
|
||||
u64 *enabled, u64 *running);
|
||||
@@ -922,14 +961,7 @@ extern int perf_event_overflow(struct perf_event *event, int nmi,
|
||||
*/
|
||||
static inline int is_software_event(struct perf_event *event)
|
||||
{
|
||||
switch (event->attr.type) {
|
||||
case PERF_TYPE_SOFTWARE:
|
||||
case PERF_TYPE_TRACEPOINT:
|
||||
/* for now the breakpoint stuff also works as software event */
|
||||
case PERF_TYPE_BREAKPOINT:
|
||||
return 1;
|
||||
}
|
||||
return 0;
|
||||
return event->pmu->task_ctx_nr == perf_sw_context;
|
||||
}
|
||||
|
||||
extern atomic_t perf_swevent_enabled[PERF_COUNT_SW_MAX];
|
||||
@@ -978,7 +1010,21 @@ extern int perf_unregister_guest_info_callbacks(struct perf_guest_info_callbacks
|
||||
extern void perf_event_comm(struct task_struct *tsk);
|
||||
extern void perf_event_fork(struct task_struct *tsk);
|
||||
|
||||
extern struct perf_callchain_entry *perf_callchain(struct pt_regs *regs);
|
||||
/* Callchains */
|
||||
DECLARE_PER_CPU(struct perf_callchain_entry, perf_callchain_entry);
|
||||
|
||||
extern void perf_callchain_user(struct perf_callchain_entry *entry,
|
||||
struct pt_regs *regs);
|
||||
extern void perf_callchain_kernel(struct perf_callchain_entry *entry,
|
||||
struct pt_regs *regs);
|
||||
|
||||
|
||||
static inline void
|
||||
perf_callchain_store(struct perf_callchain_entry *entry, u64 ip)
|
||||
{
|
||||
if (entry->nr < PERF_MAX_STACK_DEPTH)
|
||||
entry->ip[entry->nr++] = ip;
|
||||
}
|
||||
|
||||
extern int sysctl_perf_event_paranoid;
|
||||
extern int sysctl_perf_event_mlock;
|
||||
@@ -1021,21 +1067,19 @@ extern int perf_swevent_get_recursion_context(void);
|
||||
extern void perf_swevent_put_recursion_context(int rctx);
|
||||
extern void perf_event_enable(struct perf_event *event);
|
||||
extern void perf_event_disable(struct perf_event *event);
|
||||
extern void perf_event_task_tick(void);
|
||||
#else
|
||||
static inline void
|
||||
perf_event_task_sched_in(struct task_struct *task) { }
|
||||
static inline void
|
||||
perf_event_task_sched_out(struct task_struct *task,
|
||||
struct task_struct *next) { }
|
||||
static inline void
|
||||
perf_event_task_tick(struct task_struct *task) { }
|
||||
static inline int perf_event_init_task(struct task_struct *child) { return 0; }
|
||||
static inline void perf_event_exit_task(struct task_struct *child) { }
|
||||
static inline void perf_event_free_task(struct task_struct *task) { }
|
||||
static inline void perf_event_delayed_put(struct task_struct *task) { }
|
||||
static inline void perf_event_do_pending(void) { }
|
||||
static inline void perf_event_print_debug(void) { }
|
||||
static inline void perf_disable(void) { }
|
||||
static inline void perf_enable(void) { }
|
||||
static inline int perf_event_task_disable(void) { return -EINVAL; }
|
||||
static inline int perf_event_task_enable(void) { return -EINVAL; }
|
||||
|
||||
@@ -1058,6 +1102,7 @@ static inline int perf_swevent_get_recursion_context(void) { return -1; }
|
||||
static inline void perf_swevent_put_recursion_context(int rctx) { }
|
||||
static inline void perf_event_enable(struct perf_event *event) { }
|
||||
static inline void perf_event_disable(struct perf_event *event) { }
|
||||
static inline void perf_event_task_tick(void) { }
|
||||
#endif
|
||||
|
||||
#define perf_output_put(handle, x) \
|
||||
|
Reference in New Issue
Block a user