Merge branch 'perf-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip
* 'perf-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip: (35 commits) perf: Fix unexported generic perf_arch_fetch_caller_regs perf record: Don't try to find buildids in a zero sized file perf: export perf_trace_regs and perf_arch_fetch_caller_regs perf, x86: Fix hw_perf_enable() event assignment perf, ppc: Fix compile error due to new cpu notifiers perf: Make the install relative to DESTDIR if specified kprobes: Calculate the index correctly when freeing the out-of-line execution slot perf tools: Fix sparse CPU numbering related bugs perf_event: Fix oops triggered by cpu offline/online perf: Drop the obsolete profile naming for trace events perf: Take a hot regs snapshot for trace events perf: Introduce new perf_fetch_caller_regs() for hot regs snapshot perf/x86-64: Use frame pointer to walk on irq and process stacks lockdep: Move lock events under lockdep recursion protection perf report: Print the map table just after samples for which no map was found perf report: Add multiple event support perf session: Change perf_session post processing functions to take histogram tree perf session: Add storage for seperating event types in report perf session: Change add_hist_entry to take the tree root instead of session perf record: Add ID and to recorded event data when recording multiple events ...
This commit is contained in:
@@ -133,8 +133,8 @@ struct x86_pmu {
|
||||
int (*handle_irq)(struct pt_regs *);
|
||||
void (*disable_all)(void);
|
||||
void (*enable_all)(void);
|
||||
void (*enable)(struct hw_perf_event *, int);
|
||||
void (*disable)(struct hw_perf_event *, int);
|
||||
void (*enable)(struct perf_event *);
|
||||
void (*disable)(struct perf_event *);
|
||||
unsigned eventsel;
|
||||
unsigned perfctr;
|
||||
u64 (*event_map)(int);
|
||||
@@ -157,6 +157,11 @@ struct x86_pmu {
|
||||
void (*put_event_constraints)(struct cpu_hw_events *cpuc,
|
||||
struct perf_event *event);
|
||||
struct event_constraint *event_constraints;
|
||||
|
||||
void (*cpu_prepare)(int cpu);
|
||||
void (*cpu_starting)(int cpu);
|
||||
void (*cpu_dying)(int cpu);
|
||||
void (*cpu_dead)(int cpu);
|
||||
};
|
||||
|
||||
static struct x86_pmu x86_pmu __read_mostly;
|
||||
@@ -165,8 +170,7 @@ static DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events) = {
|
||||
.enabled = 1,
|
||||
};
|
||||
|
||||
static int x86_perf_event_set_period(struct perf_event *event,
|
||||
struct hw_perf_event *hwc, int idx);
|
||||
static int x86_perf_event_set_period(struct perf_event *event);
|
||||
|
||||
/*
|
||||
* Generalized hw caching related hw_event table, filled
|
||||
@@ -189,11 +193,12 @@ static u64 __read_mostly hw_cache_event_ids
|
||||
* Returns the delta events processed.
|
||||
*/
|
||||
static u64
|
||||
x86_perf_event_update(struct perf_event *event,
|
||||
struct hw_perf_event *hwc, int idx)
|
||||
x86_perf_event_update(struct perf_event *event)
|
||||
{
|
||||
struct hw_perf_event *hwc = &event->hw;
|
||||
int shift = 64 - x86_pmu.event_bits;
|
||||
u64 prev_raw_count, new_raw_count;
|
||||
int idx = hwc->idx;
|
||||
s64 delta;
|
||||
|
||||
if (idx == X86_PMC_IDX_FIXED_BTS)
|
||||
@@ -293,7 +298,7 @@ static inline bool bts_available(void)
|
||||
return x86_pmu.enable_bts != NULL;
|
||||
}
|
||||
|
||||
static inline void init_debug_store_on_cpu(int cpu)
|
||||
static void init_debug_store_on_cpu(int cpu)
|
||||
{
|
||||
struct debug_store *ds = per_cpu(cpu_hw_events, cpu).ds;
|
||||
|
||||
@@ -305,7 +310,7 @@ static inline void init_debug_store_on_cpu(int cpu)
|
||||
(u32)((u64)(unsigned long)ds >> 32));
|
||||
}
|
||||
|
||||
static inline void fini_debug_store_on_cpu(int cpu)
|
||||
static void fini_debug_store_on_cpu(int cpu)
|
||||
{
|
||||
if (!per_cpu(cpu_hw_events, cpu).ds)
|
||||
return;
|
||||
@@ -638,7 +643,7 @@ static int x86_schedule_events(struct cpu_hw_events *cpuc, int n, int *assign)
|
||||
if (test_bit(hwc->idx, used_mask))
|
||||
break;
|
||||
|
||||
set_bit(hwc->idx, used_mask);
|
||||
__set_bit(hwc->idx, used_mask);
|
||||
if (assign)
|
||||
assign[i] = hwc->idx;
|
||||
}
|
||||
@@ -687,7 +692,7 @@ static int x86_schedule_events(struct cpu_hw_events *cpuc, int n, int *assign)
|
||||
if (j == X86_PMC_IDX_MAX)
|
||||
break;
|
||||
|
||||
set_bit(j, used_mask);
|
||||
__set_bit(j, used_mask);
|
||||
|
||||
if (assign)
|
||||
assign[i] = j;
|
||||
@@ -780,6 +785,7 @@ static inline int match_prev_assignment(struct hw_perf_event *hwc,
|
||||
hwc->last_tag == cpuc->tags[i];
|
||||
}
|
||||
|
||||
static int x86_pmu_start(struct perf_event *event);
|
||||
static void x86_pmu_stop(struct perf_event *event);
|
||||
|
||||
void hw_perf_enable(void)
|
||||
@@ -796,6 +802,7 @@ void hw_perf_enable(void)
|
||||
return;
|
||||
|
||||
if (cpuc->n_added) {
|
||||
int n_running = cpuc->n_events - cpuc->n_added;
|
||||
/*
|
||||
* apply assignment obtained either from
|
||||
* hw_perf_group_sched_in() or x86_pmu_enable()
|
||||
@@ -803,8 +810,7 @@ void hw_perf_enable(void)
|
||||
* step1: save events moving to new counters
|
||||
* step2: reprogram moved events into new counters
|
||||
*/
|
||||
for (i = 0; i < cpuc->n_events; i++) {
|
||||
|
||||
for (i = 0; i < n_running; i++) {
|
||||
event = cpuc->event_list[i];
|
||||
hwc = &event->hw;
|
||||
|
||||
@@ -819,29 +825,18 @@ void hw_perf_enable(void)
|
||||
continue;
|
||||
|
||||
x86_pmu_stop(event);
|
||||
|
||||
hwc->idx = -1;
|
||||
}
|
||||
|
||||
for (i = 0; i < cpuc->n_events; i++) {
|
||||
|
||||
event = cpuc->event_list[i];
|
||||
hwc = &event->hw;
|
||||
|
||||
if (hwc->idx == -1) {
|
||||
if (!match_prev_assignment(hwc, cpuc, i))
|
||||
x86_assign_hw_event(event, cpuc, i);
|
||||
x86_perf_event_set_period(event, hwc, hwc->idx);
|
||||
}
|
||||
/*
|
||||
* need to mark as active because x86_pmu_disable()
|
||||
* clear active_mask and events[] yet it preserves
|
||||
* idx
|
||||
*/
|
||||
set_bit(hwc->idx, cpuc->active_mask);
|
||||
cpuc->events[hwc->idx] = event;
|
||||
else if (i < n_running)
|
||||
continue;
|
||||
|
||||
x86_pmu.enable(hwc, hwc->idx);
|
||||
perf_event_update_userpage(event);
|
||||
x86_pmu_start(event);
|
||||
}
|
||||
cpuc->n_added = 0;
|
||||
perf_events_lapic_init();
|
||||
@@ -853,15 +848,16 @@ void hw_perf_enable(void)
|
||||
x86_pmu.enable_all();
|
||||
}
|
||||
|
||||
static inline void __x86_pmu_enable_event(struct hw_perf_event *hwc, int idx)
|
||||
static inline void __x86_pmu_enable_event(struct hw_perf_event *hwc)
|
||||
{
|
||||
(void)checking_wrmsrl(hwc->config_base + idx,
|
||||
(void)checking_wrmsrl(hwc->config_base + hwc->idx,
|
||||
hwc->config | ARCH_PERFMON_EVENTSEL_ENABLE);
|
||||
}
|
||||
|
||||
static inline void x86_pmu_disable_event(struct hw_perf_event *hwc, int idx)
|
||||
static inline void x86_pmu_disable_event(struct perf_event *event)
|
||||
{
|
||||
(void)checking_wrmsrl(hwc->config_base + idx, hwc->config);
|
||||
struct hw_perf_event *hwc = &event->hw;
|
||||
(void)checking_wrmsrl(hwc->config_base + hwc->idx, hwc->config);
|
||||
}
|
||||
|
||||
static DEFINE_PER_CPU(u64 [X86_PMC_IDX_MAX], pmc_prev_left);
|
||||
@@ -871,12 +867,12 @@ static DEFINE_PER_CPU(u64 [X86_PMC_IDX_MAX], pmc_prev_left);
|
||||
* To be called with the event disabled in hw:
|
||||
*/
|
||||
static int
|
||||
x86_perf_event_set_period(struct perf_event *event,
|
||||
struct hw_perf_event *hwc, int idx)
|
||||
x86_perf_event_set_period(struct perf_event *event)
|
||||
{
|
||||
struct hw_perf_event *hwc = &event->hw;
|
||||
s64 left = atomic64_read(&hwc->period_left);
|
||||
s64 period = hwc->sample_period;
|
||||
int err, ret = 0;
|
||||
int err, ret = 0, idx = hwc->idx;
|
||||
|
||||
if (idx == X86_PMC_IDX_FIXED_BTS)
|
||||
return 0;
|
||||
@@ -922,11 +918,11 @@ x86_perf_event_set_period(struct perf_event *event,
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void x86_pmu_enable_event(struct hw_perf_event *hwc, int idx)
|
||||
static void x86_pmu_enable_event(struct perf_event *event)
|
||||
{
|
||||
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
|
||||
if (cpuc->enabled)
|
||||
__x86_pmu_enable_event(hwc, idx);
|
||||
__x86_pmu_enable_event(&event->hw);
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -962,34 +958,32 @@ static int x86_pmu_enable(struct perf_event *event)
|
||||
memcpy(cpuc->assign, assign, n*sizeof(int));
|
||||
|
||||
cpuc->n_events = n;
|
||||
cpuc->n_added = n - n0;
|
||||
cpuc->n_added += n - n0;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int x86_pmu_start(struct perf_event *event)
|
||||
{
|
||||
struct hw_perf_event *hwc = &event->hw;
|
||||
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
|
||||
int idx = event->hw.idx;
|
||||
|
||||
if (hwc->idx == -1)
|
||||
if (idx == -1)
|
||||
return -EAGAIN;
|
||||
|
||||
x86_perf_event_set_period(event, hwc, hwc->idx);
|
||||
x86_pmu.enable(hwc, hwc->idx);
|
||||
x86_perf_event_set_period(event);
|
||||
cpuc->events[idx] = event;
|
||||
__set_bit(idx, cpuc->active_mask);
|
||||
x86_pmu.enable(event);
|
||||
perf_event_update_userpage(event);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void x86_pmu_unthrottle(struct perf_event *event)
|
||||
{
|
||||
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
|
||||
struct hw_perf_event *hwc = &event->hw;
|
||||
|
||||
if (WARN_ON_ONCE(hwc->idx >= X86_PMC_IDX_MAX ||
|
||||
cpuc->events[hwc->idx] != event))
|
||||
return;
|
||||
|
||||
x86_pmu.enable(hwc, hwc->idx);
|
||||
int ret = x86_pmu_start(event);
|
||||
WARN_ON_ONCE(ret);
|
||||
}
|
||||
|
||||
void perf_event_print_debug(void)
|
||||
@@ -1049,18 +1043,16 @@ static void x86_pmu_stop(struct perf_event *event)
|
||||
struct hw_perf_event *hwc = &event->hw;
|
||||
int idx = hwc->idx;
|
||||
|
||||
/*
|
||||
* Must be done before we disable, otherwise the nmi handler
|
||||
* could reenable again:
|
||||
*/
|
||||
clear_bit(idx, cpuc->active_mask);
|
||||
x86_pmu.disable(hwc, idx);
|
||||
if (!__test_and_clear_bit(idx, cpuc->active_mask))
|
||||
return;
|
||||
|
||||
x86_pmu.disable(event);
|
||||
|
||||
/*
|
||||
* Drain the remaining delta count out of a event
|
||||
* that we are disabling:
|
||||
*/
|
||||
x86_perf_event_update(event, hwc, idx);
|
||||
x86_perf_event_update(event);
|
||||
|
||||
cpuc->events[idx] = NULL;
|
||||
}
|
||||
@@ -1108,7 +1100,7 @@ static int x86_pmu_handle_irq(struct pt_regs *regs)
|
||||
event = cpuc->events[idx];
|
||||
hwc = &event->hw;
|
||||
|
||||
val = x86_perf_event_update(event, hwc, idx);
|
||||
val = x86_perf_event_update(event);
|
||||
if (val & (1ULL << (x86_pmu.event_bits - 1)))
|
||||
continue;
|
||||
|
||||
@@ -1118,11 +1110,11 @@ static int x86_pmu_handle_irq(struct pt_regs *regs)
|
||||
handled = 1;
|
||||
data.period = event->hw.last_period;
|
||||
|
||||
if (!x86_perf_event_set_period(event, hwc, idx))
|
||||
if (!x86_perf_event_set_period(event))
|
||||
continue;
|
||||
|
||||
if (perf_event_overflow(event, 1, &data, regs))
|
||||
x86_pmu.disable(hwc, idx);
|
||||
x86_pmu_stop(event);
|
||||
}
|
||||
|
||||
if (handled)
|
||||
@@ -1309,7 +1301,7 @@ int hw_perf_group_sched_in(struct perf_event *leader,
|
||||
memcpy(cpuc->assign, assign, n0*sizeof(int));
|
||||
|
||||
cpuc->n_events = n0;
|
||||
cpuc->n_added = n1;
|
||||
cpuc->n_added += n1;
|
||||
ctx->nr_active += n1;
|
||||
|
||||
/*
|
||||
@@ -1337,6 +1329,39 @@ undo:
|
||||
#include "perf_event_p6.c"
|
||||
#include "perf_event_intel.c"
|
||||
|
||||
static int __cpuinit
|
||||
x86_pmu_notifier(struct notifier_block *self, unsigned long action, void *hcpu)
|
||||
{
|
||||
unsigned int cpu = (long)hcpu;
|
||||
|
||||
switch (action & ~CPU_TASKS_FROZEN) {
|
||||
case CPU_UP_PREPARE:
|
||||
if (x86_pmu.cpu_prepare)
|
||||
x86_pmu.cpu_prepare(cpu);
|
||||
break;
|
||||
|
||||
case CPU_STARTING:
|
||||
if (x86_pmu.cpu_starting)
|
||||
x86_pmu.cpu_starting(cpu);
|
||||
break;
|
||||
|
||||
case CPU_DYING:
|
||||
if (x86_pmu.cpu_dying)
|
||||
x86_pmu.cpu_dying(cpu);
|
||||
break;
|
||||
|
||||
case CPU_DEAD:
|
||||
if (x86_pmu.cpu_dead)
|
||||
x86_pmu.cpu_dead(cpu);
|
||||
break;
|
||||
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
||||
return NOTIFY_OK;
|
||||
}
|
||||
|
||||
static void __init pmu_check_apic(void)
|
||||
{
|
||||
if (cpu_has_apic)
|
||||
@@ -1415,11 +1440,13 @@ void __init init_hw_perf_events(void)
|
||||
pr_info("... max period: %016Lx\n", x86_pmu.max_period);
|
||||
pr_info("... fixed-purpose events: %d\n", x86_pmu.num_events_fixed);
|
||||
pr_info("... event mask: %016Lx\n", perf_event_mask);
|
||||
|
||||
perf_cpu_notifier(x86_pmu_notifier);
|
||||
}
|
||||
|
||||
static inline void x86_pmu_read(struct perf_event *event)
|
||||
{
|
||||
x86_perf_event_update(event, &event->hw, event->hw.idx);
|
||||
x86_perf_event_update(event);
|
||||
}
|
||||
|
||||
static const struct pmu pmu = {
|
||||
@@ -1675,28 +1702,16 @@ struct perf_callchain_entry *perf_callchain(struct pt_regs *regs)
|
||||
return entry;
|
||||
}
|
||||
|
||||
void hw_perf_event_setup_online(int cpu)
|
||||
#ifdef CONFIG_EVENT_TRACING
|
||||
void perf_arch_fetch_caller_regs(struct pt_regs *regs, unsigned long ip, int skip)
|
||||
{
|
||||
init_debug_store_on_cpu(cpu);
|
||||
|
||||
switch (boot_cpu_data.x86_vendor) {
|
||||
case X86_VENDOR_AMD:
|
||||
amd_pmu_cpu_online(cpu);
|
||||
break;
|
||||
default:
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
void hw_perf_event_setup_offline(int cpu)
|
||||
{
|
||||
init_debug_store_on_cpu(cpu);
|
||||
|
||||
switch (boot_cpu_data.x86_vendor) {
|
||||
case X86_VENDOR_AMD:
|
||||
amd_pmu_cpu_offline(cpu);
|
||||
break;
|
||||
default:
|
||||
return;
|
||||
}
|
||||
regs->ip = ip;
|
||||
/*
|
||||
* perf_arch_fetch_caller_regs adds another call, we need to increment
|
||||
* the skip level
|
||||
*/
|
||||
regs->bp = rewind_frame_pointer(skip + 1);
|
||||
regs->cs = __KERNEL_CS;
|
||||
local_save_flags(regs->flags);
|
||||
}
|
||||
#endif
|
||||
|
@@ -271,28 +271,6 @@ done:
|
||||
return &emptyconstraint;
|
||||
}
|
||||
|
||||
static __initconst struct x86_pmu amd_pmu = {
|
||||
.name = "AMD",
|
||||
.handle_irq = x86_pmu_handle_irq,
|
||||
.disable_all = x86_pmu_disable_all,
|
||||
.enable_all = x86_pmu_enable_all,
|
||||
.enable = x86_pmu_enable_event,
|
||||
.disable = x86_pmu_disable_event,
|
||||
.eventsel = MSR_K7_EVNTSEL0,
|
||||
.perfctr = MSR_K7_PERFCTR0,
|
||||
.event_map = amd_pmu_event_map,
|
||||
.raw_event = amd_pmu_raw_event,
|
||||
.max_events = ARRAY_SIZE(amd_perfmon_event_map),
|
||||
.num_events = 4,
|
||||
.event_bits = 48,
|
||||
.event_mask = (1ULL << 48) - 1,
|
||||
.apic = 1,
|
||||
/* use highest bit to detect overflow */
|
||||
.max_period = (1ULL << 47) - 1,
|
||||
.get_event_constraints = amd_get_event_constraints,
|
||||
.put_event_constraints = amd_put_event_constraints
|
||||
};
|
||||
|
||||
static struct amd_nb *amd_alloc_nb(int cpu, int nb_id)
|
||||
{
|
||||
struct amd_nb *nb;
|
||||
@@ -309,7 +287,7 @@ static struct amd_nb *amd_alloc_nb(int cpu, int nb_id)
|
||||
* initialize all possible NB constraints
|
||||
*/
|
||||
for (i = 0; i < x86_pmu.num_events; i++) {
|
||||
set_bit(i, nb->event_constraints[i].idxmsk);
|
||||
__set_bit(i, nb->event_constraints[i].idxmsk);
|
||||
nb->event_constraints[i].weight = 1;
|
||||
}
|
||||
return nb;
|
||||
@@ -378,6 +356,31 @@ static void amd_pmu_cpu_offline(int cpu)
|
||||
raw_spin_unlock(&amd_nb_lock);
|
||||
}
|
||||
|
||||
static __initconst struct x86_pmu amd_pmu = {
|
||||
.name = "AMD",
|
||||
.handle_irq = x86_pmu_handle_irq,
|
||||
.disable_all = x86_pmu_disable_all,
|
||||
.enable_all = x86_pmu_enable_all,
|
||||
.enable = x86_pmu_enable_event,
|
||||
.disable = x86_pmu_disable_event,
|
||||
.eventsel = MSR_K7_EVNTSEL0,
|
||||
.perfctr = MSR_K7_PERFCTR0,
|
||||
.event_map = amd_pmu_event_map,
|
||||
.raw_event = amd_pmu_raw_event,
|
||||
.max_events = ARRAY_SIZE(amd_perfmon_event_map),
|
||||
.num_events = 4,
|
||||
.event_bits = 48,
|
||||
.event_mask = (1ULL << 48) - 1,
|
||||
.apic = 1,
|
||||
/* use highest bit to detect overflow */
|
||||
.max_period = (1ULL << 47) - 1,
|
||||
.get_event_constraints = amd_get_event_constraints,
|
||||
.put_event_constraints = amd_put_event_constraints,
|
||||
|
||||
.cpu_prepare = amd_pmu_cpu_online,
|
||||
.cpu_dead = amd_pmu_cpu_offline,
|
||||
};
|
||||
|
||||
static __init int amd_pmu_init(void)
|
||||
{
|
||||
/* Performance-monitoring supported from K7 and later: */
|
||||
@@ -390,11 +393,6 @@ static __init int amd_pmu_init(void)
|
||||
memcpy(hw_cache_event_ids, amd_hw_cache_event_ids,
|
||||
sizeof(hw_cache_event_ids));
|
||||
|
||||
/*
|
||||
* explicitly initialize the boot cpu, other cpus will get
|
||||
* the cpu hotplug callbacks from smp_init()
|
||||
*/
|
||||
amd_pmu_cpu_online(smp_processor_id());
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -405,12 +403,4 @@ static int amd_pmu_init(void)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void amd_pmu_cpu_online(int cpu)
|
||||
{
|
||||
}
|
||||
|
||||
static void amd_pmu_cpu_offline(int cpu)
|
||||
{
|
||||
}
|
||||
|
||||
#endif
|
||||
|
@@ -548,9 +548,9 @@ static inline void intel_pmu_ack_status(u64 ack)
|
||||
}
|
||||
|
||||
static inline void
|
||||
intel_pmu_disable_fixed(struct hw_perf_event *hwc, int __idx)
|
||||
intel_pmu_disable_fixed(struct hw_perf_event *hwc)
|
||||
{
|
||||
int idx = __idx - X86_PMC_IDX_FIXED;
|
||||
int idx = hwc->idx - X86_PMC_IDX_FIXED;
|
||||
u64 ctrl_val, mask;
|
||||
|
||||
mask = 0xfULL << (idx * 4);
|
||||
@@ -621,26 +621,28 @@ static void intel_pmu_drain_bts_buffer(void)
|
||||
}
|
||||
|
||||
static inline void
|
||||
intel_pmu_disable_event(struct hw_perf_event *hwc, int idx)
|
||||
intel_pmu_disable_event(struct perf_event *event)
|
||||
{
|
||||
if (unlikely(idx == X86_PMC_IDX_FIXED_BTS)) {
|
||||
struct hw_perf_event *hwc = &event->hw;
|
||||
|
||||
if (unlikely(hwc->idx == X86_PMC_IDX_FIXED_BTS)) {
|
||||
intel_pmu_disable_bts();
|
||||
intel_pmu_drain_bts_buffer();
|
||||
return;
|
||||
}
|
||||
|
||||
if (unlikely(hwc->config_base == MSR_ARCH_PERFMON_FIXED_CTR_CTRL)) {
|
||||
intel_pmu_disable_fixed(hwc, idx);
|
||||
intel_pmu_disable_fixed(hwc);
|
||||
return;
|
||||
}
|
||||
|
||||
x86_pmu_disable_event(hwc, idx);
|
||||
x86_pmu_disable_event(event);
|
||||
}
|
||||
|
||||
static inline void
|
||||
intel_pmu_enable_fixed(struct hw_perf_event *hwc, int __idx)
|
||||
intel_pmu_enable_fixed(struct hw_perf_event *hwc)
|
||||
{
|
||||
int idx = __idx - X86_PMC_IDX_FIXED;
|
||||
int idx = hwc->idx - X86_PMC_IDX_FIXED;
|
||||
u64 ctrl_val, bits, mask;
|
||||
int err;
|
||||
|
||||
@@ -670,9 +672,11 @@ intel_pmu_enable_fixed(struct hw_perf_event *hwc, int __idx)
|
||||
err = checking_wrmsrl(hwc->config_base, ctrl_val);
|
||||
}
|
||||
|
||||
static void intel_pmu_enable_event(struct hw_perf_event *hwc, int idx)
|
||||
static void intel_pmu_enable_event(struct perf_event *event)
|
||||
{
|
||||
if (unlikely(idx == X86_PMC_IDX_FIXED_BTS)) {
|
||||
struct hw_perf_event *hwc = &event->hw;
|
||||
|
||||
if (unlikely(hwc->idx == X86_PMC_IDX_FIXED_BTS)) {
|
||||
if (!__get_cpu_var(cpu_hw_events).enabled)
|
||||
return;
|
||||
|
||||
@@ -681,11 +685,11 @@ static void intel_pmu_enable_event(struct hw_perf_event *hwc, int idx)
|
||||
}
|
||||
|
||||
if (unlikely(hwc->config_base == MSR_ARCH_PERFMON_FIXED_CTR_CTRL)) {
|
||||
intel_pmu_enable_fixed(hwc, idx);
|
||||
intel_pmu_enable_fixed(hwc);
|
||||
return;
|
||||
}
|
||||
|
||||
__x86_pmu_enable_event(hwc, idx);
|
||||
__x86_pmu_enable_event(hwc);
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -694,14 +698,8 @@ static void intel_pmu_enable_event(struct hw_perf_event *hwc, int idx)
|
||||
*/
|
||||
static int intel_pmu_save_and_restart(struct perf_event *event)
|
||||
{
|
||||
struct hw_perf_event *hwc = &event->hw;
|
||||
int idx = hwc->idx;
|
||||
int ret;
|
||||
|
||||
x86_perf_event_update(event, hwc, idx);
|
||||
ret = x86_perf_event_set_period(event, hwc, idx);
|
||||
|
||||
return ret;
|
||||
x86_perf_event_update(event);
|
||||
return x86_perf_event_set_period(event);
|
||||
}
|
||||
|
||||
static void intel_pmu_reset(void)
|
||||
@@ -745,11 +743,11 @@ static int intel_pmu_handle_irq(struct pt_regs *regs)
|
||||
|
||||
cpuc = &__get_cpu_var(cpu_hw_events);
|
||||
|
||||
perf_disable();
|
||||
intel_pmu_disable_all();
|
||||
intel_pmu_drain_bts_buffer();
|
||||
status = intel_pmu_get_status();
|
||||
if (!status) {
|
||||
perf_enable();
|
||||
intel_pmu_enable_all();
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -759,8 +757,7 @@ again:
|
||||
WARN_ONCE(1, "perfevents: irq loop stuck!\n");
|
||||
perf_event_print_debug();
|
||||
intel_pmu_reset();
|
||||
perf_enable();
|
||||
return 1;
|
||||
goto done;
|
||||
}
|
||||
|
||||
inc_irq_stat(apic_perf_irqs);
|
||||
@@ -768,7 +765,6 @@ again:
|
||||
for_each_set_bit(bit, (unsigned long *)&status, X86_PMC_IDX_MAX) {
|
||||
struct perf_event *event = cpuc->events[bit];
|
||||
|
||||
clear_bit(bit, (unsigned long *) &status);
|
||||
if (!test_bit(bit, cpuc->active_mask))
|
||||
continue;
|
||||
|
||||
@@ -778,7 +774,7 @@ again:
|
||||
data.period = event->hw.last_period;
|
||||
|
||||
if (perf_event_overflow(event, 1, &data, regs))
|
||||
intel_pmu_disable_event(&event->hw, bit);
|
||||
x86_pmu_stop(event);
|
||||
}
|
||||
|
||||
intel_pmu_ack_status(ack);
|
||||
@@ -790,8 +786,8 @@ again:
|
||||
if (status)
|
||||
goto again;
|
||||
|
||||
perf_enable();
|
||||
|
||||
done:
|
||||
intel_pmu_enable_all();
|
||||
return 1;
|
||||
}
|
||||
|
||||
@@ -870,7 +866,10 @@ static __initconst struct x86_pmu intel_pmu = {
|
||||
.max_period = (1ULL << 31) - 1,
|
||||
.enable_bts = intel_pmu_enable_bts,
|
||||
.disable_bts = intel_pmu_disable_bts,
|
||||
.get_event_constraints = intel_get_event_constraints
|
||||
.get_event_constraints = intel_get_event_constraints,
|
||||
|
||||
.cpu_starting = init_debug_store_on_cpu,
|
||||
.cpu_dying = fini_debug_store_on_cpu,
|
||||
};
|
||||
|
||||
static __init int intel_pmu_init(void)
|
||||
|
@@ -77,27 +77,29 @@ static void p6_pmu_enable_all(void)
|
||||
}
|
||||
|
||||
static inline void
|
||||
p6_pmu_disable_event(struct hw_perf_event *hwc, int idx)
|
||||
p6_pmu_disable_event(struct perf_event *event)
|
||||
{
|
||||
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
|
||||
struct hw_perf_event *hwc = &event->hw;
|
||||
u64 val = P6_NOP_EVENT;
|
||||
|
||||
if (cpuc->enabled)
|
||||
val |= ARCH_PERFMON_EVENTSEL_ENABLE;
|
||||
|
||||
(void)checking_wrmsrl(hwc->config_base + idx, val);
|
||||
(void)checking_wrmsrl(hwc->config_base + hwc->idx, val);
|
||||
}
|
||||
|
||||
static void p6_pmu_enable_event(struct hw_perf_event *hwc, int idx)
|
||||
static void p6_pmu_enable_event(struct perf_event *event)
|
||||
{
|
||||
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
|
||||
struct hw_perf_event *hwc = &event->hw;
|
||||
u64 val;
|
||||
|
||||
val = hwc->config;
|
||||
if (cpuc->enabled)
|
||||
val |= ARCH_PERFMON_EVENTSEL_ENABLE;
|
||||
|
||||
(void)checking_wrmsrl(hwc->config_base + idx, val);
|
||||
(void)checking_wrmsrl(hwc->config_base + hwc->idx, val);
|
||||
}
|
||||
|
||||
static __initconst struct x86_pmu p6_pmu = {
|
||||
|
Reference in New Issue
Block a user