Merge branch 'perf-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

* 'perf-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (121 commits)
  perf symbols: Increase symbol KSYM_NAME_LEN size
  perf hists browser: Refuse 'a' hotkey on non symbolic views
  perf ui browser: Use libslang to read keys
  perf tools: Fix tracing info recording
  perf hists browser: Elide DSO column when it is set to just one DSO, ditto for threads
  perf hists: Don't consider filtered entries when calculating column widths
  perf hists: Don't decay total_period for filtered entries
  perf hists browser: Honour symbol_conf.show_{nr_samples,total_period}
  perf hists browser: Do not exit on tab key with single event
  perf annotate browser: Don't change selection line when returning from callq
  perf tools: handle endianness of feature bitmap
  perf tools: Add prelink suggestion to dso update message
  perf script: Fix unknown feature comment
  perf hists browser: Apply the dso and thread filters when merging new batches
  perf hists: Move the dso and thread filters from hist_browser
  perf ui browser: Honour the xterm colors
  perf top tui: Give color hints just on the percentage, like on --stdio
  perf ui browser: Make the colors configurable and change the defaults
  perf tui: Remove unneeded call to newtCls on startup
  perf hists: Don't format the percentage on hist_entry__snprintf
  ...

Fix up conflicts in arch/x86/kernel/kprobes.c manually.

Ingo's tree did the insane "add volatile to const array", which just
doesn't make sense ("volatile const"?).  But we could remove the const
*and* make the array volatile to make doubly sure that gcc doesn't
optimize it away..

Also fix up kernel/trace/ring_buffer.c non-data-conflicts manually: the
reader_lock has been turned into a raw lock by the core locking merge,
and there was a new user of it introduced in this perf core merge.  Make
sure that new use also uses the raw accessor functions.
This commit is contained in:
Linus Torvalds
2011-10-26 17:03:38 +02:00
117 changed files with 6213 additions and 2884 deletions

View File

@@ -19,7 +19,7 @@ endif
obj-y := process_$(BITS).o signal.o entry_$(BITS).o
obj-y += traps.o irq.o irq_$(BITS).o dumpstack_$(BITS).o
obj-y += time.o ioport.o ldt.o dumpstack.o
obj-y += time.o ioport.o ldt.o dumpstack.o nmi.o
obj-y += setup.o x86_init.o i8259.o irqinit.o jump_label.o
obj-$(CONFIG_IRQ_WORK) += irq_work.o
obj-y += probe_roms.o

View File

@@ -60,22 +60,10 @@ void arch_trigger_all_cpu_backtrace(void)
}
static int __kprobes
arch_trigger_all_cpu_backtrace_handler(struct notifier_block *self,
unsigned long cmd, void *__args)
arch_trigger_all_cpu_backtrace_handler(unsigned int cmd, struct pt_regs *regs)
{
struct die_args *args = __args;
struct pt_regs *regs;
int cpu;
switch (cmd) {
case DIE_NMI:
break;
default:
return NOTIFY_DONE;
}
regs = args->regs;
cpu = smp_processor_id();
if (cpumask_test_cpu(cpu, to_cpumask(backtrace_mask))) {
@@ -86,21 +74,16 @@ arch_trigger_all_cpu_backtrace_handler(struct notifier_block *self,
show_regs(regs);
arch_spin_unlock(&lock);
cpumask_clear_cpu(cpu, to_cpumask(backtrace_mask));
return NOTIFY_STOP;
return NMI_HANDLED;
}
return NOTIFY_DONE;
return NMI_DONE;
}
static __read_mostly struct notifier_block backtrace_notifier = {
.notifier_call = arch_trigger_all_cpu_backtrace_handler,
.next = NULL,
.priority = NMI_LOCAL_LOW_PRIOR,
};
static int __init register_trigger_all_cpu_backtrace(void)
{
register_die_notifier(&backtrace_notifier);
register_nmi_handler(NMI_LOCAL, arch_trigger_all_cpu_backtrace_handler,
0, "arch_bt");
return 0;
}
early_initcall(register_trigger_all_cpu_backtrace);

View File

@@ -672,18 +672,11 @@ void __cpuinit uv_cpu_init(void)
/*
* When NMI is received, print a stack trace.
*/
int uv_handle_nmi(struct notifier_block *self, unsigned long reason, void *data)
int uv_handle_nmi(unsigned int reason, struct pt_regs *regs)
{
unsigned long real_uv_nmi;
int bid;
if (reason != DIE_NMIUNKNOWN)
return NOTIFY_OK;
if (in_crash_kexec)
/* do nothing if entering the crash kernel */
return NOTIFY_OK;
/*
* Each blade has an MMR that indicates when an NMI has been sent
* to cpus on the blade. If an NMI is detected, atomically
@@ -704,7 +697,7 @@ int uv_handle_nmi(struct notifier_block *self, unsigned long reason, void *data)
}
if (likely(__get_cpu_var(cpu_last_nmi_count) == uv_blade_info[bid].nmi_count))
return NOTIFY_DONE;
return NMI_DONE;
__get_cpu_var(cpu_last_nmi_count) = uv_blade_info[bid].nmi_count;
@@ -717,17 +710,12 @@ int uv_handle_nmi(struct notifier_block *self, unsigned long reason, void *data)
dump_stack();
spin_unlock(&uv_nmi_lock);
return NOTIFY_STOP;
return NMI_HANDLED;
}
static struct notifier_block uv_dump_stack_nmi_nb = {
.notifier_call = uv_handle_nmi,
.priority = NMI_LOCAL_LOW_PRIOR - 1,
};
void uv_register_nmi_notifier(void)
{
if (register_die_notifier(&uv_dump_stack_nmi_nb))
if (register_nmi_handler(NMI_UNKNOWN, uv_handle_nmi, 0, "uv"))
printk(KERN_WARNING "UV NMI handler failed to register\n");
}

View File

@@ -28,10 +28,15 @@ obj-$(CONFIG_CPU_SUP_UMC_32) += umc.o
obj-$(CONFIG_PERF_EVENTS) += perf_event.o
ifdef CONFIG_PERF_EVENTS
obj-$(CONFIG_CPU_SUP_AMD) += perf_event_amd.o
obj-$(CONFIG_CPU_SUP_INTEL) += perf_event_p6.o perf_event_p4.o perf_event_intel_lbr.o perf_event_intel_ds.o perf_event_intel.o
endif
obj-$(CONFIG_X86_MCE) += mcheck/
obj-$(CONFIG_MTRR) += mtrr/
obj-$(CONFIG_X86_LOCAL_APIC) += perfctr-watchdog.o
obj-$(CONFIG_X86_LOCAL_APIC) += perfctr-watchdog.o perf_event_amd_ibs.o
quiet_cmd_mkcapflags = MKCAP $@
cmd_mkcapflags = $(PERL) $(srctree)/$(src)/mkcapflags.pl $< $@

View File

@@ -78,27 +78,20 @@ static void raise_exception(struct mce *m, struct pt_regs *pregs)
static cpumask_var_t mce_inject_cpumask;
static int mce_raise_notify(struct notifier_block *self,
unsigned long val, void *data)
static int mce_raise_notify(unsigned int cmd, struct pt_regs *regs)
{
struct die_args *args = (struct die_args *)data;
int cpu = smp_processor_id();
struct mce *m = &__get_cpu_var(injectm);
if (val != DIE_NMI || !cpumask_test_cpu(cpu, mce_inject_cpumask))
return NOTIFY_DONE;
if (!cpumask_test_cpu(cpu, mce_inject_cpumask))
return NMI_DONE;
cpumask_clear_cpu(cpu, mce_inject_cpumask);
if (m->inject_flags & MCJ_EXCEPTION)
raise_exception(m, args->regs);
raise_exception(m, regs);
else if (m->status)
raise_poll(m);
return NOTIFY_STOP;
return NMI_HANDLED;
}
static struct notifier_block mce_raise_nb = {
.notifier_call = mce_raise_notify,
.priority = NMI_LOCAL_NORMAL_PRIOR,
};
/* Inject mce on current CPU */
static int raise_local(void)
{
@@ -216,7 +209,8 @@ static int inject_init(void)
return -ENOMEM;
printk(KERN_INFO "Machine check injector initialized\n");
mce_chrdev_ops.write = mce_write;
register_die_notifier(&mce_raise_nb);
register_nmi_handler(NMI_LOCAL, mce_raise_notify, 0,
"mce_notify");
return 0;
}

View File

@@ -908,9 +908,6 @@ void do_machine_check(struct pt_regs *regs, long error_code)
percpu_inc(mce_exception_count);
if (notify_die(DIE_NMI, "machine check", regs, error_code,
18, SIGKILL) == NOTIFY_STOP)
goto out;
if (!banks)
goto out;
@@ -1140,6 +1137,15 @@ static void mce_start_timer(unsigned long data)
add_timer_on(t, smp_processor_id());
}
/* Must not be called in IRQ context where del_timer_sync() can deadlock */
static void mce_timer_delete_all(void)
{
int cpu;
for_each_online_cpu(cpu)
del_timer_sync(&per_cpu(mce_timer, cpu));
}
static void mce_do_trigger(struct work_struct *work)
{
call_usermodehelper(mce_helper, mce_helper_argv, NULL, UMH_NO_WAIT);
@@ -1750,7 +1756,6 @@ static struct syscore_ops mce_syscore_ops = {
static void mce_cpu_restart(void *data)
{
del_timer_sync(&__get_cpu_var(mce_timer));
if (!mce_available(__this_cpu_ptr(&cpu_info)))
return;
__mcheck_cpu_init_generic();
@@ -1760,16 +1765,15 @@ static void mce_cpu_restart(void *data)
/* Reinit MCEs after user configuration changes */
static void mce_restart(void)
{
mce_timer_delete_all();
on_each_cpu(mce_cpu_restart, NULL, 1);
}
/* Toggle features for corrected errors */
static void mce_disable_ce(void *all)
static void mce_disable_cmci(void *data)
{
if (!mce_available(__this_cpu_ptr(&cpu_info)))
return;
if (all)
del_timer_sync(&__get_cpu_var(mce_timer));
cmci_clear();
}
@@ -1852,7 +1856,8 @@ static ssize_t set_ignore_ce(struct sys_device *s,
if (mce_ignore_ce ^ !!new) {
if (new) {
/* disable ce features */
on_each_cpu(mce_disable_ce, (void *)1, 1);
mce_timer_delete_all();
on_each_cpu(mce_disable_cmci, NULL, 1);
mce_ignore_ce = 1;
} else {
/* enable ce features */
@@ -1875,7 +1880,7 @@ static ssize_t set_cmci_disabled(struct sys_device *s,
if (mce_cmci_disabled ^ !!new) {
if (new) {
/* disable cmci */
on_each_cpu(mce_disable_ce, NULL, 1);
on_each_cpu(mce_disable_cmci, NULL, 1);
mce_cmci_disabled = 1;
} else {
/* enable cmci */

View File

@@ -32,6 +32,8 @@
#include <asm/smp.h>
#include <asm/alternative.h>
#include "perf_event.h"
#if 0
#undef wrmsrl
#define wrmsrl(msr, val) \
@@ -43,283 +45,17 @@ do { \
} while (0)
#endif
/*
* | NHM/WSM | SNB |
* register -------------------------------
* | HT | no HT | HT | no HT |
*-----------------------------------------
* offcore | core | core | cpu | core |
* lbr_sel | core | core | cpu | core |
* ld_lat | cpu | core | cpu | core |
*-----------------------------------------
*
* Given that there is a small number of shared regs,
* we can pre-allocate their slot in the per-cpu
* per-core reg tables.
*/
enum extra_reg_type {
EXTRA_REG_NONE = -1, /* not used */
struct x86_pmu x86_pmu __read_mostly;
EXTRA_REG_RSP_0 = 0, /* offcore_response_0 */
EXTRA_REG_RSP_1 = 1, /* offcore_response_1 */
EXTRA_REG_MAX /* number of entries needed */
};
struct event_constraint {
union {
unsigned long idxmsk[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
u64 idxmsk64;
};
u64 code;
u64 cmask;
int weight;
};
struct amd_nb {
int nb_id; /* NorthBridge id */
int refcnt; /* reference count */
struct perf_event *owners[X86_PMC_IDX_MAX];
struct event_constraint event_constraints[X86_PMC_IDX_MAX];
};
struct intel_percore;
#define MAX_LBR_ENTRIES 16
struct cpu_hw_events {
/*
* Generic x86 PMC bits
*/
struct perf_event *events[X86_PMC_IDX_MAX]; /* in counter order */
unsigned long active_mask[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
unsigned long running[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
int enabled;
int n_events;
int n_added;
int n_txn;
int assign[X86_PMC_IDX_MAX]; /* event to counter assignment */
u64 tags[X86_PMC_IDX_MAX];
struct perf_event *event_list[X86_PMC_IDX_MAX]; /* in enabled order */
unsigned int group_flag;
/*
* Intel DebugStore bits
*/
struct debug_store *ds;
u64 pebs_enabled;
/*
* Intel LBR bits
*/
int lbr_users;
void *lbr_context;
struct perf_branch_stack lbr_stack;
struct perf_branch_entry lbr_entries[MAX_LBR_ENTRIES];
/*
* manage shared (per-core, per-cpu) registers
* used on Intel NHM/WSM/SNB
*/
struct intel_shared_regs *shared_regs;
/*
* AMD specific bits
*/
struct amd_nb *amd_nb;
};
#define __EVENT_CONSTRAINT(c, n, m, w) {\
{ .idxmsk64 = (n) }, \
.code = (c), \
.cmask = (m), \
.weight = (w), \
}
#define EVENT_CONSTRAINT(c, n, m) \
__EVENT_CONSTRAINT(c, n, m, HWEIGHT(n))
/*
* Constraint on the Event code.
*/
#define INTEL_EVENT_CONSTRAINT(c, n) \
EVENT_CONSTRAINT(c, n, ARCH_PERFMON_EVENTSEL_EVENT)
/*
* Constraint on the Event code + UMask + fixed-mask
*
* filter mask to validate fixed counter events.
* the following filters disqualify for fixed counters:
* - inv
* - edge
* - cnt-mask
* The other filters are supported by fixed counters.
* The any-thread option is supported starting with v3.
*/
#define FIXED_EVENT_CONSTRAINT(c, n) \
EVENT_CONSTRAINT(c, (1ULL << (32+n)), X86_RAW_EVENT_MASK)
/*
* Constraint on the Event code + UMask
*/
#define INTEL_UEVENT_CONSTRAINT(c, n) \
EVENT_CONSTRAINT(c, n, INTEL_ARCH_EVENT_MASK)
#define EVENT_CONSTRAINT_END \
EVENT_CONSTRAINT(0, 0, 0)
#define for_each_event_constraint(e, c) \
for ((e) = (c); (e)->weight; (e)++)
/*
* Per register state.
*/
struct er_account {
raw_spinlock_t lock; /* per-core: protect structure */
u64 config; /* extra MSR config */
u64 reg; /* extra MSR number */
atomic_t ref; /* reference count */
};
/*
* Extra registers for specific events.
*
* Some events need large masks and require external MSRs.
* Those extra MSRs end up being shared for all events on
* a PMU and sometimes between PMU of sibling HT threads.
* In either case, the kernel needs to handle conflicting
* accesses to those extra, shared, regs. The data structure
* to manage those registers is stored in cpu_hw_event.
*/
struct extra_reg {
unsigned int event;
unsigned int msr;
u64 config_mask;
u64 valid_mask;
int idx; /* per_xxx->regs[] reg index */
};
#define EVENT_EXTRA_REG(e, ms, m, vm, i) { \
.event = (e), \
.msr = (ms), \
.config_mask = (m), \
.valid_mask = (vm), \
.idx = EXTRA_REG_##i \
}
#define INTEL_EVENT_EXTRA_REG(event, msr, vm, idx) \
EVENT_EXTRA_REG(event, msr, ARCH_PERFMON_EVENTSEL_EVENT, vm, idx)
#define EVENT_EXTRA_END EVENT_EXTRA_REG(0, 0, 0, 0, RSP_0)
union perf_capabilities {
struct {
u64 lbr_format : 6;
u64 pebs_trap : 1;
u64 pebs_arch_reg : 1;
u64 pebs_format : 4;
u64 smm_freeze : 1;
};
u64 capabilities;
};
/*
* struct x86_pmu - generic x86 pmu
*/
struct x86_pmu {
/*
* Generic x86 PMC bits
*/
const char *name;
int version;
int (*handle_irq)(struct pt_regs *);
void (*disable_all)(void);
void (*enable_all)(int added);
void (*enable)(struct perf_event *);
void (*disable)(struct perf_event *);
int (*hw_config)(struct perf_event *event);
int (*schedule_events)(struct cpu_hw_events *cpuc, int n, int *assign);
unsigned eventsel;
unsigned perfctr;
u64 (*event_map)(int);
int max_events;
int num_counters;
int num_counters_fixed;
int cntval_bits;
u64 cntval_mask;
int apic;
u64 max_period;
struct event_constraint *
(*get_event_constraints)(struct cpu_hw_events *cpuc,
struct perf_event *event);
void (*put_event_constraints)(struct cpu_hw_events *cpuc,
struct perf_event *event);
struct event_constraint *event_constraints;
void (*quirks)(void);
int perfctr_second_write;
int (*cpu_prepare)(int cpu);
void (*cpu_starting)(int cpu);
void (*cpu_dying)(int cpu);
void (*cpu_dead)(int cpu);
/*
* Intel Arch Perfmon v2+
*/
u64 intel_ctrl;
union perf_capabilities intel_cap;
/*
* Intel DebugStore bits
*/
int bts, pebs;
int bts_active, pebs_active;
int pebs_record_size;
void (*drain_pebs)(struct pt_regs *regs);
struct event_constraint *pebs_constraints;
/*
* Intel LBR
*/
unsigned long lbr_tos, lbr_from, lbr_to; /* MSR base regs */
int lbr_nr; /* hardware stack size */
/*
* Extra registers for events
*/
struct extra_reg *extra_regs;
unsigned int er_flags;
};
#define ERF_NO_HT_SHARING 1
#define ERF_HAS_RSP_1 2
static struct x86_pmu x86_pmu __read_mostly;
static DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events) = {
DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events) = {
.enabled = 1,
};
static int x86_perf_event_set_period(struct perf_event *event);
/*
* Generalized hw caching related hw_event table, filled
* in on a per model basis. A value of 0 means
* 'not supported', -1 means 'hw_event makes no sense on
* this CPU', any other value means the raw hw_event
* ID.
*/
#define C(x) PERF_COUNT_HW_CACHE_##x
static u64 __read_mostly hw_cache_event_ids
u64 __read_mostly hw_cache_event_ids
[PERF_COUNT_HW_CACHE_MAX]
[PERF_COUNT_HW_CACHE_OP_MAX]
[PERF_COUNT_HW_CACHE_RESULT_MAX];
static u64 __read_mostly hw_cache_extra_regs
u64 __read_mostly hw_cache_extra_regs
[PERF_COUNT_HW_CACHE_MAX]
[PERF_COUNT_HW_CACHE_OP_MAX]
[PERF_COUNT_HW_CACHE_RESULT_MAX];
@@ -329,8 +65,7 @@ static u64 __read_mostly hw_cache_extra_regs
* Can only be executed on the CPU where the event is active.
* Returns the delta events processed.
*/
static u64
x86_perf_event_update(struct perf_event *event)
u64 x86_perf_event_update(struct perf_event *event)
{
struct hw_perf_event *hwc = &event->hw;
int shift = 64 - x86_pmu.cntval_bits;
@@ -373,30 +108,6 @@ again:
return new_raw_count;
}
static inline int x86_pmu_addr_offset(int index)
{
int offset;
/* offset = X86_FEATURE_PERFCTR_CORE ? index << 1 : index */
alternative_io(ASM_NOP2,
"shll $1, %%eax",
X86_FEATURE_PERFCTR_CORE,
"=a" (offset),
"a" (index));
return offset;
}
static inline unsigned int x86_pmu_config_addr(int index)
{
return x86_pmu.eventsel + x86_pmu_addr_offset(index);
}
static inline unsigned int x86_pmu_event_addr(int index)
{
return x86_pmu.perfctr + x86_pmu_addr_offset(index);
}
/*
* Find and validate any extra registers to set up.
*/
@@ -532,9 +243,6 @@ msr_fail:
return false;
}
static void reserve_ds_buffers(void);
static void release_ds_buffers(void);
static void hw_perf_event_destroy(struct perf_event *event)
{
if (atomic_dec_and_mutex_lock(&active_events, &pmc_reserve_mutex)) {
@@ -583,7 +291,7 @@ set_ext_hw_attr(struct hw_perf_event *hwc, struct perf_event *event)
return x86_pmu_extra_regs(val, event);
}
static int x86_setup_perfctr(struct perf_event *event)
int x86_setup_perfctr(struct perf_event *event)
{
struct perf_event_attr *attr = &event->attr;
struct hw_perf_event *hwc = &event->hw;
@@ -647,7 +355,7 @@ static int x86_setup_perfctr(struct perf_event *event)
return 0;
}
static int x86_pmu_hw_config(struct perf_event *event)
int x86_pmu_hw_config(struct perf_event *event)
{
if (event->attr.precise_ip) {
int precise = 0;
@@ -723,7 +431,7 @@ static int __x86_pmu_event_init(struct perf_event *event)
return x86_pmu.hw_config(event);
}
static void x86_pmu_disable_all(void)
void x86_pmu_disable_all(void)
{
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
int idx;
@@ -758,15 +466,7 @@ static void x86_pmu_disable(struct pmu *pmu)
x86_pmu.disable_all();
}
static inline void __x86_pmu_enable_event(struct hw_perf_event *hwc,
u64 enable_mask)
{
if (hwc->extra_reg.reg)
wrmsrl(hwc->extra_reg.reg, hwc->extra_reg.config);
wrmsrl(hwc->config_base, hwc->config | enable_mask);
}
static void x86_pmu_enable_all(int added)
void x86_pmu_enable_all(int added)
{
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
int idx;
@@ -788,7 +488,7 @@ static inline int is_x86_event(struct perf_event *event)
return event->pmu == &pmu;
}
static int x86_schedule_events(struct cpu_hw_events *cpuc, int n, int *assign)
int x86_schedule_events(struct cpu_hw_events *cpuc, int n, int *assign)
{
struct event_constraint *c, *constraints[X86_PMC_IDX_MAX];
unsigned long used_mask[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
@@ -959,7 +659,6 @@ static inline int match_prev_assignment(struct hw_perf_event *hwc,
}
static void x86_pmu_start(struct perf_event *event, int flags);
static void x86_pmu_stop(struct perf_event *event, int flags);
static void x86_pmu_enable(struct pmu *pmu)
{
@@ -1031,21 +730,13 @@ static void x86_pmu_enable(struct pmu *pmu)
x86_pmu.enable_all(added);
}
static inline void x86_pmu_disable_event(struct perf_event *event)
{
struct hw_perf_event *hwc = &event->hw;
wrmsrl(hwc->config_base, hwc->config);
}
static DEFINE_PER_CPU(u64 [X86_PMC_IDX_MAX], pmc_prev_left);
/*
* Set the next IRQ period, based on the hwc->period_left value.
* To be called with the event disabled in hw:
*/
static int
x86_perf_event_set_period(struct perf_event *event)
int x86_perf_event_set_period(struct perf_event *event)
{
struct hw_perf_event *hwc = &event->hw;
s64 left = local64_read(&hwc->period_left);
@@ -1105,7 +796,7 @@ x86_perf_event_set_period(struct perf_event *event)
return ret;
}
static void x86_pmu_enable_event(struct perf_event *event)
void x86_pmu_enable_event(struct perf_event *event)
{
if (__this_cpu_read(cpu_hw_events.enabled))
__x86_pmu_enable_event(&event->hw,
@@ -1244,7 +935,7 @@ void perf_event_print_debug(void)
local_irq_restore(flags);
}
static void x86_pmu_stop(struct perf_event *event, int flags)
void x86_pmu_stop(struct perf_event *event, int flags)
{
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
struct hw_perf_event *hwc = &event->hw;
@@ -1297,7 +988,7 @@ static void x86_pmu_del(struct perf_event *event, int flags)
perf_event_update_userpage(event);
}
static int x86_pmu_handle_irq(struct pt_regs *regs)
int x86_pmu_handle_irq(struct pt_regs *regs)
{
struct perf_sample_data data;
struct cpu_hw_events *cpuc;
@@ -1367,109 +1058,28 @@ void perf_events_lapic_init(void)
apic_write(APIC_LVTPC, APIC_DM_NMI);
}
struct pmu_nmi_state {
unsigned int marked;
int handled;
};
static DEFINE_PER_CPU(struct pmu_nmi_state, pmu_nmi);
static int __kprobes
perf_event_nmi_handler(struct notifier_block *self,
unsigned long cmd, void *__args)
perf_event_nmi_handler(unsigned int cmd, struct pt_regs *regs)
{
struct die_args *args = __args;
unsigned int this_nmi;
int handled;
if (!atomic_read(&active_events))
return NOTIFY_DONE;
return NMI_DONE;
switch (cmd) {
case DIE_NMI:
break;
case DIE_NMIUNKNOWN:
this_nmi = percpu_read(irq_stat.__nmi_count);
if (this_nmi != __this_cpu_read(pmu_nmi.marked))
/* let the kernel handle the unknown nmi */
return NOTIFY_DONE;
/*
* This one is a PMU back-to-back nmi. Two events
* trigger 'simultaneously' raising two back-to-back
* NMIs. If the first NMI handles both, the latter
* will be empty and daze the CPU. So, we drop it to
* avoid false-positive 'unknown nmi' messages.
*/
return NOTIFY_STOP;
default:
return NOTIFY_DONE;
}
handled = x86_pmu.handle_irq(args->regs);
if (!handled)
return NOTIFY_DONE;
this_nmi = percpu_read(irq_stat.__nmi_count);
if ((handled > 1) ||
/* the next nmi could be a back-to-back nmi */
((__this_cpu_read(pmu_nmi.marked) == this_nmi) &&
(__this_cpu_read(pmu_nmi.handled) > 1))) {
/*
* We could have two subsequent back-to-back nmis: The
* first handles more than one counter, the 2nd
* handles only one counter and the 3rd handles no
* counter.
*
* This is the 2nd nmi because the previous was
* handling more than one counter. We will mark the
* next (3rd) and then drop it if unhandled.
*/
__this_cpu_write(pmu_nmi.marked, this_nmi + 1);
__this_cpu_write(pmu_nmi.handled, handled);
}
return NOTIFY_STOP;
return x86_pmu.handle_irq(regs);
}
static __read_mostly struct notifier_block perf_event_nmi_notifier = {
.notifier_call = perf_event_nmi_handler,
.next = NULL,
.priority = NMI_LOCAL_LOW_PRIOR,
};
static struct event_constraint unconstrained;
static struct event_constraint emptyconstraint;
static struct event_constraint *
x86_get_event_constraints(struct cpu_hw_events *cpuc, struct perf_event *event)
{
struct event_constraint *c;
if (x86_pmu.event_constraints) {
for_each_event_constraint(c, x86_pmu.event_constraints) {
if ((event->hw.config & c->cmask) == c->code)
return c;
}
}
return &unconstrained;
}
#include "perf_event_amd.c"
#include "perf_event_p6.c"
#include "perf_event_p4.c"
#include "perf_event_intel_lbr.c"
#include "perf_event_intel_ds.c"
#include "perf_event_intel.c"
struct event_constraint emptyconstraint;
struct event_constraint unconstrained;
static int __cpuinit
x86_pmu_notifier(struct notifier_block *self, unsigned long action, void *hcpu)
{
unsigned int cpu = (long)hcpu;
struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
int ret = NOTIFY_OK;
switch (action & ~CPU_TASKS_FROZEN) {
case CPU_UP_PREPARE:
cpuc->kfree_on_online = NULL;
if (x86_pmu.cpu_prepare)
ret = x86_pmu.cpu_prepare(cpu);
break;
@@ -1479,6 +1089,10 @@ x86_pmu_notifier(struct notifier_block *self, unsigned long action, void *hcpu)
x86_pmu.cpu_starting(cpu);
break;
case CPU_ONLINE:
kfree(cpuc->kfree_on_online);
break;
case CPU_DYING:
if (x86_pmu.cpu_dying)
x86_pmu.cpu_dying(cpu);
@@ -1557,7 +1171,7 @@ static int __init init_hw_perf_events(void)
((1LL << x86_pmu.num_counters_fixed)-1) << X86_PMC_IDX_FIXED;
perf_events_lapic_init();
register_die_notifier(&perf_event_nmi_notifier);
register_nmi_handler(NMI_LOCAL, perf_event_nmi_handler, 0, "PMI");
unconstrained = (struct event_constraint)
__EVENT_CONSTRAINT(0, (1ULL << x86_pmu.num_counters) - 1,

View File

@@ -0,0 +1,505 @@
/*
* Performance events x86 architecture header
*
* Copyright (C) 2008 Thomas Gleixner <tglx@linutronix.de>
* Copyright (C) 2008-2009 Red Hat, Inc., Ingo Molnar
* Copyright (C) 2009 Jaswinder Singh Rajput
* Copyright (C) 2009 Advanced Micro Devices, Inc., Robert Richter
* Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
* Copyright (C) 2009 Intel Corporation, <markus.t.metzger@intel.com>
* Copyright (C) 2009 Google, Inc., Stephane Eranian
*
* For licencing details see kernel-base/COPYING
*/
#include <linux/perf_event.h>
/*
* | NHM/WSM | SNB |
* register -------------------------------
* | HT | no HT | HT | no HT |
*-----------------------------------------
* offcore | core | core | cpu | core |
* lbr_sel | core | core | cpu | core |
* ld_lat | cpu | core | cpu | core |
*-----------------------------------------
*
* Given that there is a small number of shared regs,
* we can pre-allocate their slot in the per-cpu
* per-core reg tables.
*/
enum extra_reg_type {
EXTRA_REG_NONE = -1, /* not used */
EXTRA_REG_RSP_0 = 0, /* offcore_response_0 */
EXTRA_REG_RSP_1 = 1, /* offcore_response_1 */
EXTRA_REG_MAX /* number of entries needed */
};
struct event_constraint {
union {
unsigned long idxmsk[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
u64 idxmsk64;
};
u64 code;
u64 cmask;
int weight;
};
struct amd_nb {
int nb_id; /* NorthBridge id */
int refcnt; /* reference count */
struct perf_event *owners[X86_PMC_IDX_MAX];
struct event_constraint event_constraints[X86_PMC_IDX_MAX];
};
/* The maximal number of PEBS events: */
#define MAX_PEBS_EVENTS 4
/*
* A debug store configuration.
*
* We only support architectures that use 64bit fields.
*/
struct debug_store {
u64 bts_buffer_base;
u64 bts_index;
u64 bts_absolute_maximum;
u64 bts_interrupt_threshold;
u64 pebs_buffer_base;
u64 pebs_index;
u64 pebs_absolute_maximum;
u64 pebs_interrupt_threshold;
u64 pebs_event_reset[MAX_PEBS_EVENTS];
};
/*
* Per register state.
*/
struct er_account {
raw_spinlock_t lock; /* per-core: protect structure */
u64 config; /* extra MSR config */
u64 reg; /* extra MSR number */
atomic_t ref; /* reference count */
};
/*
* Per core/cpu state
*
* Used to coordinate shared registers between HT threads or
* among events on a single PMU.
*/
struct intel_shared_regs {
struct er_account regs[EXTRA_REG_MAX];
int refcnt; /* per-core: #HT threads */
unsigned core_id; /* per-core: core id */
};
#define MAX_LBR_ENTRIES 16
struct cpu_hw_events {
/*
* Generic x86 PMC bits
*/
struct perf_event *events[X86_PMC_IDX_MAX]; /* in counter order */
unsigned long active_mask[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
unsigned long running[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
int enabled;
int n_events;
int n_added;
int n_txn;
int assign[X86_PMC_IDX_MAX]; /* event to counter assignment */
u64 tags[X86_PMC_IDX_MAX];
struct perf_event *event_list[X86_PMC_IDX_MAX]; /* in enabled order */
unsigned int group_flag;
/*
* Intel DebugStore bits
*/
struct debug_store *ds;
u64 pebs_enabled;
/*
* Intel LBR bits
*/
int lbr_users;
void *lbr_context;
struct perf_branch_stack lbr_stack;
struct perf_branch_entry lbr_entries[MAX_LBR_ENTRIES];
/*
* Intel host/guest exclude bits
*/
u64 intel_ctrl_guest_mask;
u64 intel_ctrl_host_mask;
struct perf_guest_switch_msr guest_switch_msrs[X86_PMC_IDX_MAX];
/*
* manage shared (per-core, per-cpu) registers
* used on Intel NHM/WSM/SNB
*/
struct intel_shared_regs *shared_regs;
/*
* AMD specific bits
*/
struct amd_nb *amd_nb;
void *kfree_on_online;
};
#define __EVENT_CONSTRAINT(c, n, m, w) {\
{ .idxmsk64 = (n) }, \
.code = (c), \
.cmask = (m), \
.weight = (w), \
}
#define EVENT_CONSTRAINT(c, n, m) \
__EVENT_CONSTRAINT(c, n, m, HWEIGHT(n))
/*
* Constraint on the Event code.
*/
#define INTEL_EVENT_CONSTRAINT(c, n) \
EVENT_CONSTRAINT(c, n, ARCH_PERFMON_EVENTSEL_EVENT)
/*
* Constraint on the Event code + UMask + fixed-mask
*
* filter mask to validate fixed counter events.
* the following filters disqualify for fixed counters:
* - inv
* - edge
* - cnt-mask
* The other filters are supported by fixed counters.
* The any-thread option is supported starting with v3.
*/
#define FIXED_EVENT_CONSTRAINT(c, n) \
EVENT_CONSTRAINT(c, (1ULL << (32+n)), X86_RAW_EVENT_MASK)
/*
* Constraint on the Event code + UMask
*/
#define INTEL_UEVENT_CONSTRAINT(c, n) \
EVENT_CONSTRAINT(c, n, INTEL_ARCH_EVENT_MASK)
#define EVENT_CONSTRAINT_END \
EVENT_CONSTRAINT(0, 0, 0)
#define for_each_event_constraint(e, c) \
for ((e) = (c); (e)->weight; (e)++)
/*
* Extra registers for specific events.
*
* Some events need large masks and require external MSRs.
* Those extra MSRs end up being shared for all events on
* a PMU and sometimes between PMU of sibling HT threads.
* In either case, the kernel needs to handle conflicting
* accesses to those extra, shared, regs. The data structure
* to manage those registers is stored in cpu_hw_event.
*/
struct extra_reg {
unsigned int event;
unsigned int msr;
u64 config_mask;
u64 valid_mask;
int idx; /* per_xxx->regs[] reg index */
};
#define EVENT_EXTRA_REG(e, ms, m, vm, i) { \
.event = (e), \
.msr = (ms), \
.config_mask = (m), \
.valid_mask = (vm), \
.idx = EXTRA_REG_##i \
}
#define INTEL_EVENT_EXTRA_REG(event, msr, vm, idx) \
EVENT_EXTRA_REG(event, msr, ARCH_PERFMON_EVENTSEL_EVENT, vm, idx)
#define EVENT_EXTRA_END EVENT_EXTRA_REG(0, 0, 0, 0, RSP_0)
union perf_capabilities {
struct {
u64 lbr_format:6;
u64 pebs_trap:1;
u64 pebs_arch_reg:1;
u64 pebs_format:4;
u64 smm_freeze:1;
};
u64 capabilities;
};
/*
* struct x86_pmu - generic x86 pmu
*/
struct x86_pmu {
/*
* Generic x86 PMC bits
*/
const char *name;
int version;
int (*handle_irq)(struct pt_regs *);
void (*disable_all)(void);
void (*enable_all)(int added);
void (*enable)(struct perf_event *);
void (*disable)(struct perf_event *);
int (*hw_config)(struct perf_event *event);
int (*schedule_events)(struct cpu_hw_events *cpuc, int n, int *assign);
unsigned eventsel;
unsigned perfctr;
u64 (*event_map)(int);
int max_events;
int num_counters;
int num_counters_fixed;
int cntval_bits;
u64 cntval_mask;
int apic;
u64 max_period;
struct event_constraint *
(*get_event_constraints)(struct cpu_hw_events *cpuc,
struct perf_event *event);
void (*put_event_constraints)(struct cpu_hw_events *cpuc,
struct perf_event *event);
struct event_constraint *event_constraints;
void (*quirks)(void);
int perfctr_second_write;
int (*cpu_prepare)(int cpu);
void (*cpu_starting)(int cpu);
void (*cpu_dying)(int cpu);
void (*cpu_dead)(int cpu);
/*
* Intel Arch Perfmon v2+
*/
u64 intel_ctrl;
union perf_capabilities intel_cap;
/*
* Intel DebugStore bits
*/
int bts, pebs;
int bts_active, pebs_active;
int pebs_record_size;
void (*drain_pebs)(struct pt_regs *regs);
struct event_constraint *pebs_constraints;
/*
* Intel LBR
*/
unsigned long lbr_tos, lbr_from, lbr_to; /* MSR base regs */
int lbr_nr; /* hardware stack size */
/*
* Extra registers for events
*/
struct extra_reg *extra_regs;
unsigned int er_flags;
/*
* Intel host/guest support (KVM)
*/
struct perf_guest_switch_msr *(*guest_get_msrs)(int *nr);
};
#define ERF_NO_HT_SHARING 1
#define ERF_HAS_RSP_1 2
extern struct x86_pmu x86_pmu __read_mostly;
DECLARE_PER_CPU(struct cpu_hw_events, cpu_hw_events);
int x86_perf_event_set_period(struct perf_event *event);
/*
* Generalized hw caching related hw_event table, filled
* in on a per model basis. A value of 0 means
* 'not supported', -1 means 'hw_event makes no sense on
* this CPU', any other value means the raw hw_event
* ID.
*/
#define C(x) PERF_COUNT_HW_CACHE_##x
extern u64 __read_mostly hw_cache_event_ids
[PERF_COUNT_HW_CACHE_MAX]
[PERF_COUNT_HW_CACHE_OP_MAX]
[PERF_COUNT_HW_CACHE_RESULT_MAX];
extern u64 __read_mostly hw_cache_extra_regs
[PERF_COUNT_HW_CACHE_MAX]
[PERF_COUNT_HW_CACHE_OP_MAX]
[PERF_COUNT_HW_CACHE_RESULT_MAX];
u64 x86_perf_event_update(struct perf_event *event);
static inline int x86_pmu_addr_offset(int index)
{
int offset;
/* offset = X86_FEATURE_PERFCTR_CORE ? index << 1 : index */
alternative_io(ASM_NOP2,
"shll $1, %%eax",
X86_FEATURE_PERFCTR_CORE,
"=a" (offset),
"a" (index));
return offset;
}
static inline unsigned int x86_pmu_config_addr(int index)
{
return x86_pmu.eventsel + x86_pmu_addr_offset(index);
}
static inline unsigned int x86_pmu_event_addr(int index)
{
return x86_pmu.perfctr + x86_pmu_addr_offset(index);
}
int x86_setup_perfctr(struct perf_event *event);
int x86_pmu_hw_config(struct perf_event *event);
void x86_pmu_disable_all(void);
static inline void __x86_pmu_enable_event(struct hw_perf_event *hwc,
u64 enable_mask)
{
if (hwc->extra_reg.reg)
wrmsrl(hwc->extra_reg.reg, hwc->extra_reg.config);
wrmsrl(hwc->config_base, hwc->config | enable_mask);
}
void x86_pmu_enable_all(int added);
int x86_schedule_events(struct cpu_hw_events *cpuc, int n, int *assign);
void x86_pmu_stop(struct perf_event *event, int flags);
static inline void x86_pmu_disable_event(struct perf_event *event)
{
struct hw_perf_event *hwc = &event->hw;
wrmsrl(hwc->config_base, hwc->config);
}
void x86_pmu_enable_event(struct perf_event *event);
int x86_pmu_handle_irq(struct pt_regs *regs);
extern struct event_constraint emptyconstraint;
extern struct event_constraint unconstrained;
#ifdef CONFIG_CPU_SUP_AMD
int amd_pmu_init(void);
#else /* CONFIG_CPU_SUP_AMD */
static inline int amd_pmu_init(void)
{
return 0;
}
#endif /* CONFIG_CPU_SUP_AMD */
#ifdef CONFIG_CPU_SUP_INTEL
int intel_pmu_save_and_restart(struct perf_event *event);
struct event_constraint *
x86_get_event_constraints(struct cpu_hw_events *cpuc, struct perf_event *event);
struct intel_shared_regs *allocate_shared_regs(int cpu);
int intel_pmu_init(void);
void init_debug_store_on_cpu(int cpu);
void fini_debug_store_on_cpu(int cpu);
void release_ds_buffers(void);
void reserve_ds_buffers(void);
extern struct event_constraint bts_constraint;
void intel_pmu_enable_bts(u64 config);
void intel_pmu_disable_bts(void);
int intel_pmu_drain_bts_buffer(void);
extern struct event_constraint intel_core2_pebs_event_constraints[];
extern struct event_constraint intel_atom_pebs_event_constraints[];
extern struct event_constraint intel_nehalem_pebs_event_constraints[];
extern struct event_constraint intel_westmere_pebs_event_constraints[];
extern struct event_constraint intel_snb_pebs_event_constraints[];
struct event_constraint *intel_pebs_constraints(struct perf_event *event);
void intel_pmu_pebs_enable(struct perf_event *event);
void intel_pmu_pebs_disable(struct perf_event *event);
void intel_pmu_pebs_enable_all(void);
void intel_pmu_pebs_disable_all(void);
void intel_ds_init(void);
void intel_pmu_lbr_reset(void);
void intel_pmu_lbr_enable(struct perf_event *event);
void intel_pmu_lbr_disable(struct perf_event *event);
void intel_pmu_lbr_enable_all(void);
void intel_pmu_lbr_disable_all(void);
void intel_pmu_lbr_read(void);
void intel_pmu_lbr_init_core(void);
void intel_pmu_lbr_init_nhm(void);
void intel_pmu_lbr_init_atom(void);
int p4_pmu_init(void);
int p6_pmu_init(void);
#else /* CONFIG_CPU_SUP_INTEL */
static inline void reserve_ds_buffers(void)
{
}
static inline void release_ds_buffers(void)
{
}
static inline int intel_pmu_init(void)
{
return 0;
}
static inline struct intel_shared_regs *allocate_shared_regs(int cpu)
{
return NULL;
}
#endif /* CONFIG_CPU_SUP_INTEL */

View File

@@ -1,4 +1,10 @@
#ifdef CONFIG_CPU_SUP_AMD
#include <linux/perf_event.h>
#include <linux/types.h>
#include <linux/init.h>
#include <linux/slab.h>
#include <asm/apicdef.h>
#include "perf_event.h"
static __initconst const u64 amd_hw_cache_event_ids
[PERF_COUNT_HW_CACHE_MAX]
@@ -132,6 +138,19 @@ static int amd_pmu_hw_config(struct perf_event *event)
if (ret)
return ret;
if (event->attr.exclude_host && event->attr.exclude_guest)
/*
* When HO == GO == 1 the hardware treats that as GO == HO == 0
* and will count in both modes. We don't want to count in that
* case so we emulate no-counting by setting US = OS = 0.
*/
event->hw.config &= ~(ARCH_PERFMON_EVENTSEL_USR |
ARCH_PERFMON_EVENTSEL_OS);
else if (event->attr.exclude_host)
event->hw.config |= AMD_PERFMON_EVENTSEL_GUESTONLY;
else if (event->attr.exclude_guest)
event->hw.config |= AMD_PERFMON_EVENTSEL_HOSTONLY;
if (event->attr.type != PERF_TYPE_RAW)
return 0;
@@ -350,7 +369,7 @@ static void amd_pmu_cpu_starting(int cpu)
continue;
if (nb->nb_id == nb_id) {
kfree(cpuc->amd_nb);
cpuc->kfree_on_online = cpuc->amd_nb;
cpuc->amd_nb = nb;
break;
}
@@ -392,7 +411,7 @@ static __initconst const struct x86_pmu amd_pmu = {
.perfctr = MSR_K7_PERFCTR0,
.event_map = amd_pmu_event_map,
.max_events = ARRAY_SIZE(amd_perfmon_event_map),
.num_counters = 4,
.num_counters = AMD64_NUM_COUNTERS,
.cntval_bits = 48,
.cntval_mask = (1ULL << 48) - 1,
.apic = 1,
@@ -556,7 +575,7 @@ static __initconst const struct x86_pmu amd_pmu_f15h = {
.perfctr = MSR_F15H_PERF_CTR,
.event_map = amd_pmu_event_map,
.max_events = ARRAY_SIZE(amd_perfmon_event_map),
.num_counters = 6,
.num_counters = AMD64_NUM_COUNTERS_F15H,
.cntval_bits = 48,
.cntval_mask = (1ULL << 48) - 1,
.apic = 1,
@@ -573,7 +592,7 @@ static __initconst const struct x86_pmu amd_pmu_f15h = {
#endif
};
static __init int amd_pmu_init(void)
__init int amd_pmu_init(void)
{
/* Performance-monitoring supported from K7 and later: */
if (boot_cpu_data.x86 < 6)
@@ -602,12 +621,3 @@ static __init int amd_pmu_init(void)
return 0;
}
#else /* CONFIG_CPU_SUP_AMD */
static int amd_pmu_init(void)
{
return 0;
}
#endif

View File

@@ -0,0 +1,294 @@
/*
* Performance events - AMD IBS
*
* Copyright (C) 2011 Advanced Micro Devices, Inc., Robert Richter
*
* For licencing details see kernel-base/COPYING
*/
#include <linux/perf_event.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <asm/apic.h>
static u32 ibs_caps;
#if defined(CONFIG_PERF_EVENTS) && defined(CONFIG_CPU_SUP_AMD)
static struct pmu perf_ibs;
static int perf_ibs_init(struct perf_event *event)
{
if (perf_ibs.type != event->attr.type)
return -ENOENT;
return 0;
}
static int perf_ibs_add(struct perf_event *event, int flags)
{
return 0;
}
static void perf_ibs_del(struct perf_event *event, int flags)
{
}
static struct pmu perf_ibs = {
.event_init= perf_ibs_init,
.add= perf_ibs_add,
.del= perf_ibs_del,
};
static __init int perf_event_ibs_init(void)
{
if (!ibs_caps)
return -ENODEV; /* ibs not supported by the cpu */
perf_pmu_register(&perf_ibs, "ibs", -1);
printk(KERN_INFO "perf: AMD IBS detected (0x%08x)\n", ibs_caps);
return 0;
}
#else /* defined(CONFIG_PERF_EVENTS) && defined(CONFIG_CPU_SUP_AMD) */
static __init int perf_event_ibs_init(void) { return 0; }
#endif
/* IBS - apic initialization, for perf and oprofile */
static __init u32 __get_ibs_caps(void)
{
u32 caps;
unsigned int max_level;
if (!boot_cpu_has(X86_FEATURE_IBS))
return 0;
/* check IBS cpuid feature flags */
max_level = cpuid_eax(0x80000000);
if (max_level < IBS_CPUID_FEATURES)
return IBS_CAPS_DEFAULT;
caps = cpuid_eax(IBS_CPUID_FEATURES);
if (!(caps & IBS_CAPS_AVAIL))
/* cpuid flags not valid */
return IBS_CAPS_DEFAULT;
return caps;
}
u32 get_ibs_caps(void)
{
return ibs_caps;
}
EXPORT_SYMBOL(get_ibs_caps);
static inline int get_eilvt(int offset)
{
return !setup_APIC_eilvt(offset, 0, APIC_EILVT_MSG_NMI, 1);
}
static inline int put_eilvt(int offset)
{
return !setup_APIC_eilvt(offset, 0, 0, 1);
}
/*
* Check and reserve APIC extended interrupt LVT offset for IBS if available.
*/
static inline int ibs_eilvt_valid(void)
{
int offset;
u64 val;
int valid = 0;
preempt_disable();
rdmsrl(MSR_AMD64_IBSCTL, val);
offset = val & IBSCTL_LVT_OFFSET_MASK;
if (!(val & IBSCTL_LVT_OFFSET_VALID)) {
pr_err(FW_BUG "cpu %d, invalid IBS interrupt offset %d (MSR%08X=0x%016llx)\n",
smp_processor_id(), offset, MSR_AMD64_IBSCTL, val);
goto out;
}
if (!get_eilvt(offset)) {
pr_err(FW_BUG "cpu %d, IBS interrupt offset %d not available (MSR%08X=0x%016llx)\n",
smp_processor_id(), offset, MSR_AMD64_IBSCTL, val);
goto out;
}
valid = 1;
out:
preempt_enable();
return valid;
}
static int setup_ibs_ctl(int ibs_eilvt_off)
{
struct pci_dev *cpu_cfg;
int nodes;
u32 value = 0;
nodes = 0;
cpu_cfg = NULL;
do {
cpu_cfg = pci_get_device(PCI_VENDOR_ID_AMD,
PCI_DEVICE_ID_AMD_10H_NB_MISC,
cpu_cfg);
if (!cpu_cfg)
break;
++nodes;
pci_write_config_dword(cpu_cfg, IBSCTL, ibs_eilvt_off
| IBSCTL_LVT_OFFSET_VALID);
pci_read_config_dword(cpu_cfg, IBSCTL, &value);
if (value != (ibs_eilvt_off | IBSCTL_LVT_OFFSET_VALID)) {
pci_dev_put(cpu_cfg);
printk(KERN_DEBUG "Failed to setup IBS LVT offset, "
"IBSCTL = 0x%08x\n", value);
return -EINVAL;
}
} while (1);
if (!nodes) {
printk(KERN_DEBUG "No CPU node configured for IBS\n");
return -ENODEV;
}
return 0;
}
/*
* This runs only on the current cpu. We try to find an LVT offset and
* setup the local APIC. For this we must disable preemption. On
* success we initialize all nodes with this offset. This updates then
* the offset in the IBS_CTL per-node msr. The per-core APIC setup of
* the IBS interrupt vector is handled by perf_ibs_cpu_notifier that
* is using the new offset.
*/
static int force_ibs_eilvt_setup(void)
{
int offset;
int ret;
preempt_disable();
/* find the next free available EILVT entry, skip offset 0 */
for (offset = 1; offset < APIC_EILVT_NR_MAX; offset++) {
if (get_eilvt(offset))
break;
}
preempt_enable();
if (offset == APIC_EILVT_NR_MAX) {
printk(KERN_DEBUG "No EILVT entry available\n");
return -EBUSY;
}
ret = setup_ibs_ctl(offset);
if (ret)
goto out;
if (!ibs_eilvt_valid()) {
ret = -EFAULT;
goto out;
}
pr_err(FW_BUG "using offset %d for IBS interrupts\n", offset);
pr_err(FW_BUG "workaround enabled for IBS LVT offset\n");
return 0;
out:
preempt_disable();
put_eilvt(offset);
preempt_enable();
return ret;
}
static inline int get_ibs_lvt_offset(void)
{
u64 val;
rdmsrl(MSR_AMD64_IBSCTL, val);
if (!(val & IBSCTL_LVT_OFFSET_VALID))
return -EINVAL;
return val & IBSCTL_LVT_OFFSET_MASK;
}
static void setup_APIC_ibs(void *dummy)
{
int offset;
offset = get_ibs_lvt_offset();
if (offset < 0)
goto failed;
if (!setup_APIC_eilvt(offset, 0, APIC_EILVT_MSG_NMI, 0))
return;
failed:
pr_warn("perf: IBS APIC setup failed on cpu #%d\n",
smp_processor_id());
}
static void clear_APIC_ibs(void *dummy)
{
int offset;
offset = get_ibs_lvt_offset();
if (offset >= 0)
setup_APIC_eilvt(offset, 0, APIC_EILVT_MSG_FIX, 1);
}
static int __cpuinit
perf_ibs_cpu_notifier(struct notifier_block *self, unsigned long action, void *hcpu)
{
switch (action & ~CPU_TASKS_FROZEN) {
case CPU_STARTING:
setup_APIC_ibs(NULL);
break;
case CPU_DYING:
clear_APIC_ibs(NULL);
break;
default:
break;
}
return NOTIFY_OK;
}
static __init int amd_ibs_init(void)
{
u32 caps;
int ret;
caps = __get_ibs_caps();
if (!caps)
return -ENODEV; /* ibs not supported by the cpu */
if (!ibs_eilvt_valid()) {
ret = force_ibs_eilvt_setup();
if (ret) {
pr_err("Failed to setup IBS, %d\n", ret);
return ret;
}
}
get_online_cpus();
ibs_caps = caps;
/* make ibs_caps visible to other cpus: */
smp_mb();
perf_cpu_notifier(perf_ibs_cpu_notifier);
smp_call_function(setup_APIC_ibs, NULL, 1);
put_online_cpus();
return perf_event_ibs_init();
}
/* Since we need the pci subsystem to init ibs we can't do this earlier: */
device_initcall(amd_ibs_init);

View File

@@ -1,16 +1,19 @@
#ifdef CONFIG_CPU_SUP_INTEL
/*
* Per core/cpu state
*
* Used to coordinate shared registers between HT threads or
* among events on a single PMU.
*/
struct intel_shared_regs {
struct er_account regs[EXTRA_REG_MAX];
int refcnt; /* per-core: #HT threads */
unsigned core_id; /* per-core: core id */
};
#include <linux/stddef.h>
#include <linux/types.h>
#include <linux/init.h>
#include <linux/slab.h>
#include <asm/hardirq.h>
#include <asm/apic.h>
#include "perf_event.h"
/*
* Intel PerfMon, used on Core and later.
@@ -746,7 +749,8 @@ static void intel_pmu_enable_all(int added)
intel_pmu_pebs_enable_all();
intel_pmu_lbr_enable_all();
wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, x86_pmu.intel_ctrl);
wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL,
x86_pmu.intel_ctrl & ~cpuc->intel_ctrl_guest_mask);
if (test_bit(X86_PMC_IDX_FIXED_BTS, cpuc->active_mask)) {
struct perf_event *event =
@@ -869,6 +873,7 @@ static void intel_pmu_disable_fixed(struct hw_perf_event *hwc)
static void intel_pmu_disable_event(struct perf_event *event)
{
struct hw_perf_event *hwc = &event->hw;
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
if (unlikely(hwc->idx == X86_PMC_IDX_FIXED_BTS)) {
intel_pmu_disable_bts();
@@ -876,6 +881,9 @@ static void intel_pmu_disable_event(struct perf_event *event)
return;
}
cpuc->intel_ctrl_guest_mask &= ~(1ull << hwc->idx);
cpuc->intel_ctrl_host_mask &= ~(1ull << hwc->idx);
if (unlikely(hwc->config_base == MSR_ARCH_PERFMON_FIXED_CTR_CTRL)) {
intel_pmu_disable_fixed(hwc);
return;
@@ -921,6 +929,7 @@ static void intel_pmu_enable_fixed(struct hw_perf_event *hwc)
static void intel_pmu_enable_event(struct perf_event *event)
{
struct hw_perf_event *hwc = &event->hw;
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
if (unlikely(hwc->idx == X86_PMC_IDX_FIXED_BTS)) {
if (!__this_cpu_read(cpu_hw_events.enabled))
@@ -930,6 +939,11 @@ static void intel_pmu_enable_event(struct perf_event *event)
return;
}
if (event->attr.exclude_host)
cpuc->intel_ctrl_guest_mask |= (1ull << hwc->idx);
if (event->attr.exclude_guest)
cpuc->intel_ctrl_host_mask |= (1ull << hwc->idx);
if (unlikely(hwc->config_base == MSR_ARCH_PERFMON_FIXED_CTR_CTRL)) {
intel_pmu_enable_fixed(hwc);
return;
@@ -945,7 +959,7 @@ static void intel_pmu_enable_event(struct perf_event *event)
* Save and restart an expired event. Called by NMI contexts,
* so it has to be careful about preempting normal event ops:
*/
static int intel_pmu_save_and_restart(struct perf_event *event)
int intel_pmu_save_and_restart(struct perf_event *event)
{
x86_perf_event_update(event);
return x86_perf_event_set_period(event);
@@ -1197,6 +1211,21 @@ intel_shared_regs_constraints(struct cpu_hw_events *cpuc,
return c;
}
struct event_constraint *
x86_get_event_constraints(struct cpu_hw_events *cpuc, struct perf_event *event)
{
struct event_constraint *c;
if (x86_pmu.event_constraints) {
for_each_event_constraint(c, x86_pmu.event_constraints) {
if ((event->hw.config & c->cmask) == c->code)
return c;
}
}
return &unconstrained;
}
static struct event_constraint *
intel_get_event_constraints(struct cpu_hw_events *cpuc, struct perf_event *event)
{
@@ -1284,12 +1313,84 @@ static int intel_pmu_hw_config(struct perf_event *event)
return 0;
}
struct perf_guest_switch_msr *perf_guest_get_msrs(int *nr)
{
if (x86_pmu.guest_get_msrs)
return x86_pmu.guest_get_msrs(nr);
*nr = 0;
return NULL;
}
EXPORT_SYMBOL_GPL(perf_guest_get_msrs);
static struct perf_guest_switch_msr *intel_guest_get_msrs(int *nr)
{
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
struct perf_guest_switch_msr *arr = cpuc->guest_switch_msrs;
arr[0].msr = MSR_CORE_PERF_GLOBAL_CTRL;
arr[0].host = x86_pmu.intel_ctrl & ~cpuc->intel_ctrl_guest_mask;
arr[0].guest = x86_pmu.intel_ctrl & ~cpuc->intel_ctrl_host_mask;
*nr = 1;
return arr;
}
static struct perf_guest_switch_msr *core_guest_get_msrs(int *nr)
{
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
struct perf_guest_switch_msr *arr = cpuc->guest_switch_msrs;
int idx;
for (idx = 0; idx < x86_pmu.num_counters; idx++) {
struct perf_event *event = cpuc->events[idx];
arr[idx].msr = x86_pmu_config_addr(idx);
arr[idx].host = arr[idx].guest = 0;
if (!test_bit(idx, cpuc->active_mask))
continue;
arr[idx].host = arr[idx].guest =
event->hw.config | ARCH_PERFMON_EVENTSEL_ENABLE;
if (event->attr.exclude_host)
arr[idx].host &= ~ARCH_PERFMON_EVENTSEL_ENABLE;
else if (event->attr.exclude_guest)
arr[idx].guest &= ~ARCH_PERFMON_EVENTSEL_ENABLE;
}
*nr = x86_pmu.num_counters;
return arr;
}
static void core_pmu_enable_event(struct perf_event *event)
{
if (!event->attr.exclude_host)
x86_pmu_enable_event(event);
}
static void core_pmu_enable_all(int added)
{
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
int idx;
for (idx = 0; idx < x86_pmu.num_counters; idx++) {
struct hw_perf_event *hwc = &cpuc->events[idx]->hw;
if (!test_bit(idx, cpuc->active_mask) ||
cpuc->events[idx]->attr.exclude_host)
continue;
__x86_pmu_enable_event(hwc, ARCH_PERFMON_EVENTSEL_ENABLE);
}
}
static __initconst const struct x86_pmu core_pmu = {
.name = "core",
.handle_irq = x86_pmu_handle_irq,
.disable_all = x86_pmu_disable_all,
.enable_all = x86_pmu_enable_all,
.enable = x86_pmu_enable_event,
.enable_all = core_pmu_enable_all,
.enable = core_pmu_enable_event,
.disable = x86_pmu_disable_event,
.hw_config = x86_pmu_hw_config,
.schedule_events = x86_schedule_events,
@@ -1307,9 +1408,10 @@ static __initconst const struct x86_pmu core_pmu = {
.get_event_constraints = intel_get_event_constraints,
.put_event_constraints = intel_put_event_constraints,
.event_constraints = intel_core_event_constraints,
.guest_get_msrs = core_guest_get_msrs,
};
static struct intel_shared_regs *allocate_shared_regs(int cpu)
struct intel_shared_regs *allocate_shared_regs(int cpu)
{
struct intel_shared_regs *regs;
int i;
@@ -1362,7 +1464,7 @@ static void intel_pmu_cpu_starting(int cpu)
pc = per_cpu(cpu_hw_events, i).shared_regs;
if (pc && pc->core_id == core_id) {
kfree(cpuc->shared_regs);
cpuc->kfree_on_online = cpuc->shared_regs;
cpuc->shared_regs = pc;
break;
}
@@ -1413,6 +1515,7 @@ static __initconst const struct x86_pmu intel_pmu = {
.cpu_prepare = intel_pmu_cpu_prepare,
.cpu_starting = intel_pmu_cpu_starting,
.cpu_dying = intel_pmu_cpu_dying,
.guest_get_msrs = intel_guest_get_msrs,
};
static void intel_clovertown_quirks(void)
@@ -1441,7 +1544,7 @@ static void intel_clovertown_quirks(void)
x86_pmu.pebs_constraints = NULL;
}
static __init int intel_pmu_init(void)
__init int intel_pmu_init(void)
{
union cpuid10_edx edx;
union cpuid10_eax eax;
@@ -1597,7 +1700,7 @@ static __init int intel_pmu_init(void)
intel_pmu_lbr_init_nhm();
x86_pmu.event_constraints = intel_snb_event_constraints;
x86_pmu.pebs_constraints = intel_snb_pebs_events;
x86_pmu.pebs_constraints = intel_snb_pebs_event_constraints;
x86_pmu.extra_regs = intel_snb_extra_regs;
/* all extra regs are per-cpu when HT is on */
x86_pmu.er_flags |= ERF_HAS_RSP_1;
@@ -1628,16 +1731,3 @@ static __init int intel_pmu_init(void)
}
return 0;
}
#else /* CONFIG_CPU_SUP_INTEL */
static int intel_pmu_init(void)
{
return 0;
}
static struct intel_shared_regs *allocate_shared_regs(int cpu)
{
return NULL;
}
#endif /* CONFIG_CPU_SUP_INTEL */

View File

@@ -1,7 +1,10 @@
#ifdef CONFIG_CPU_SUP_INTEL
#include <linux/bitops.h>
#include <linux/types.h>
#include <linux/slab.h>
/* The maximal number of PEBS events: */
#define MAX_PEBS_EVENTS 4
#include <asm/perf_event.h>
#include "perf_event.h"
/* The size of a BTS record in bytes: */
#define BTS_RECORD_SIZE 24
@@ -37,24 +40,7 @@ struct pebs_record_nhm {
u64 status, dla, dse, lat;
};
/*
* A debug store configuration.
*
* We only support architectures that use 64bit fields.
*/
struct debug_store {
u64 bts_buffer_base;
u64 bts_index;
u64 bts_absolute_maximum;
u64 bts_interrupt_threshold;
u64 pebs_buffer_base;
u64 pebs_index;
u64 pebs_absolute_maximum;
u64 pebs_interrupt_threshold;
u64 pebs_event_reset[MAX_PEBS_EVENTS];
};
static void init_debug_store_on_cpu(int cpu)
void init_debug_store_on_cpu(int cpu)
{
struct debug_store *ds = per_cpu(cpu_hw_events, cpu).ds;
@@ -66,7 +52,7 @@ static void init_debug_store_on_cpu(int cpu)
(u32)((u64)(unsigned long)ds >> 32));
}
static void fini_debug_store_on_cpu(int cpu)
void fini_debug_store_on_cpu(int cpu)
{
if (!per_cpu(cpu_hw_events, cpu).ds)
return;
@@ -175,7 +161,7 @@ static void release_ds_buffer(int cpu)
kfree(ds);
}
static void release_ds_buffers(void)
void release_ds_buffers(void)
{
int cpu;
@@ -194,7 +180,7 @@ static void release_ds_buffers(void)
put_online_cpus();
}
static void reserve_ds_buffers(void)
void reserve_ds_buffers(void)
{
int bts_err = 0, pebs_err = 0;
int cpu;
@@ -260,10 +246,10 @@ static void reserve_ds_buffers(void)
* BTS
*/
static struct event_constraint bts_constraint =
struct event_constraint bts_constraint =
EVENT_CONSTRAINT(0, 1ULL << X86_PMC_IDX_FIXED_BTS, 0);
static void intel_pmu_enable_bts(u64 config)
void intel_pmu_enable_bts(u64 config)
{
unsigned long debugctlmsr;
@@ -282,7 +268,7 @@ static void intel_pmu_enable_bts(u64 config)
update_debugctlmsr(debugctlmsr);
}
static void intel_pmu_disable_bts(void)
void intel_pmu_disable_bts(void)
{
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
unsigned long debugctlmsr;
@@ -299,7 +285,7 @@ static void intel_pmu_disable_bts(void)
update_debugctlmsr(debugctlmsr);
}
static int intel_pmu_drain_bts_buffer(void)
int intel_pmu_drain_bts_buffer(void)
{
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
struct debug_store *ds = cpuc->ds;
@@ -361,7 +347,7 @@ static int intel_pmu_drain_bts_buffer(void)
/*
* PEBS
*/
static struct event_constraint intel_core2_pebs_event_constraints[] = {
struct event_constraint intel_core2_pebs_event_constraints[] = {
INTEL_UEVENT_CONSTRAINT(0x00c0, 0x1), /* INST_RETIRED.ANY */
INTEL_UEVENT_CONSTRAINT(0xfec1, 0x1), /* X87_OPS_RETIRED.ANY */
INTEL_UEVENT_CONSTRAINT(0x00c5, 0x1), /* BR_INST_RETIRED.MISPRED */
@@ -370,14 +356,14 @@ static struct event_constraint intel_core2_pebs_event_constraints[] = {
EVENT_CONSTRAINT_END
};
static struct event_constraint intel_atom_pebs_event_constraints[] = {
struct event_constraint intel_atom_pebs_event_constraints[] = {
INTEL_UEVENT_CONSTRAINT(0x00c0, 0x1), /* INST_RETIRED.ANY */
INTEL_UEVENT_CONSTRAINT(0x00c5, 0x1), /* MISPREDICTED_BRANCH_RETIRED */
INTEL_EVENT_CONSTRAINT(0xcb, 0x1), /* MEM_LOAD_RETIRED.* */
EVENT_CONSTRAINT_END
};
static struct event_constraint intel_nehalem_pebs_event_constraints[] = {
struct event_constraint intel_nehalem_pebs_event_constraints[] = {
INTEL_EVENT_CONSTRAINT(0x0b, 0xf), /* MEM_INST_RETIRED.* */
INTEL_EVENT_CONSTRAINT(0x0f, 0xf), /* MEM_UNCORE_RETIRED.* */
INTEL_UEVENT_CONSTRAINT(0x010c, 0xf), /* MEM_STORE_RETIRED.DTLB_MISS */
@@ -392,7 +378,7 @@ static struct event_constraint intel_nehalem_pebs_event_constraints[] = {
EVENT_CONSTRAINT_END
};
static struct event_constraint intel_westmere_pebs_event_constraints[] = {
struct event_constraint intel_westmere_pebs_event_constraints[] = {
INTEL_EVENT_CONSTRAINT(0x0b, 0xf), /* MEM_INST_RETIRED.* */
INTEL_EVENT_CONSTRAINT(0x0f, 0xf), /* MEM_UNCORE_RETIRED.* */
INTEL_UEVENT_CONSTRAINT(0x010c, 0xf), /* MEM_STORE_RETIRED.DTLB_MISS */
@@ -407,7 +393,7 @@ static struct event_constraint intel_westmere_pebs_event_constraints[] = {
EVENT_CONSTRAINT_END
};
static struct event_constraint intel_snb_pebs_events[] = {
struct event_constraint intel_snb_pebs_event_constraints[] = {
INTEL_UEVENT_CONSTRAINT(0x01c0, 0x2), /* INST_RETIRED.PRECDIST */
INTEL_UEVENT_CONSTRAINT(0x01c2, 0xf), /* UOPS_RETIRED.ALL */
INTEL_UEVENT_CONSTRAINT(0x02c2, 0xf), /* UOPS_RETIRED.RETIRE_SLOTS */
@@ -428,8 +414,7 @@ static struct event_constraint intel_snb_pebs_events[] = {
EVENT_CONSTRAINT_END
};
static struct event_constraint *
intel_pebs_constraints(struct perf_event *event)
struct event_constraint *intel_pebs_constraints(struct perf_event *event)
{
struct event_constraint *c;
@@ -446,7 +431,7 @@ intel_pebs_constraints(struct perf_event *event)
return &emptyconstraint;
}
static void intel_pmu_pebs_enable(struct perf_event *event)
void intel_pmu_pebs_enable(struct perf_event *event)
{
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
struct hw_perf_event *hwc = &event->hw;
@@ -460,7 +445,7 @@ static void intel_pmu_pebs_enable(struct perf_event *event)
intel_pmu_lbr_enable(event);
}
static void intel_pmu_pebs_disable(struct perf_event *event)
void intel_pmu_pebs_disable(struct perf_event *event)
{
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
struct hw_perf_event *hwc = &event->hw;
@@ -475,7 +460,7 @@ static void intel_pmu_pebs_disable(struct perf_event *event)
intel_pmu_lbr_disable(event);
}
static void intel_pmu_pebs_enable_all(void)
void intel_pmu_pebs_enable_all(void)
{
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
@@ -483,7 +468,7 @@ static void intel_pmu_pebs_enable_all(void)
wrmsrl(MSR_IA32_PEBS_ENABLE, cpuc->pebs_enabled);
}
static void intel_pmu_pebs_disable_all(void)
void intel_pmu_pebs_disable_all(void)
{
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
@@ -576,8 +561,6 @@ static int intel_pmu_pebs_fixup_ip(struct pt_regs *regs)
return 0;
}
static int intel_pmu_save_and_restart(struct perf_event *event);
static void __intel_pmu_pebs_event(struct perf_event *event,
struct pt_regs *iregs, void *__pebs)
{
@@ -716,7 +699,7 @@ static void intel_pmu_drain_pebs_nhm(struct pt_regs *iregs)
* BTS, PEBS probe and setup
*/
static void intel_ds_init(void)
void intel_ds_init(void)
{
/*
* No support for 32bit formats
@@ -749,15 +732,3 @@ static void intel_ds_init(void)
}
}
}
#else /* CONFIG_CPU_SUP_INTEL */
static void reserve_ds_buffers(void)
{
}
static void release_ds_buffers(void)
{
}
#endif /* CONFIG_CPU_SUP_INTEL */

View File

@@ -1,4 +1,10 @@
#ifdef CONFIG_CPU_SUP_INTEL
#include <linux/perf_event.h>
#include <linux/types.h>
#include <asm/perf_event.h>
#include <asm/msr.h>
#include "perf_event.h"
enum {
LBR_FORMAT_32 = 0x00,
@@ -48,7 +54,7 @@ static void intel_pmu_lbr_reset_64(void)
}
}
static void intel_pmu_lbr_reset(void)
void intel_pmu_lbr_reset(void)
{
if (!x86_pmu.lbr_nr)
return;
@@ -59,7 +65,7 @@ static void intel_pmu_lbr_reset(void)
intel_pmu_lbr_reset_64();
}
static void intel_pmu_lbr_enable(struct perf_event *event)
void intel_pmu_lbr_enable(struct perf_event *event)
{
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
@@ -81,7 +87,7 @@ static void intel_pmu_lbr_enable(struct perf_event *event)
cpuc->lbr_users++;
}
static void intel_pmu_lbr_disable(struct perf_event *event)
void intel_pmu_lbr_disable(struct perf_event *event)
{
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
@@ -95,7 +101,7 @@ static void intel_pmu_lbr_disable(struct perf_event *event)
__intel_pmu_lbr_disable();
}
static void intel_pmu_lbr_enable_all(void)
void intel_pmu_lbr_enable_all(void)
{
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
@@ -103,7 +109,7 @@ static void intel_pmu_lbr_enable_all(void)
__intel_pmu_lbr_enable();
}
static void intel_pmu_lbr_disable_all(void)
void intel_pmu_lbr_disable_all(void)
{
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
@@ -178,7 +184,7 @@ static void intel_pmu_lbr_read_64(struct cpu_hw_events *cpuc)
cpuc->lbr_stack.nr = i;
}
static void intel_pmu_lbr_read(void)
void intel_pmu_lbr_read(void)
{
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
@@ -191,7 +197,7 @@ static void intel_pmu_lbr_read(void)
intel_pmu_lbr_read_64(cpuc);
}
static void intel_pmu_lbr_init_core(void)
void intel_pmu_lbr_init_core(void)
{
x86_pmu.lbr_nr = 4;
x86_pmu.lbr_tos = 0x01c9;
@@ -199,7 +205,7 @@ static void intel_pmu_lbr_init_core(void)
x86_pmu.lbr_to = 0x60;
}
static void intel_pmu_lbr_init_nhm(void)
void intel_pmu_lbr_init_nhm(void)
{
x86_pmu.lbr_nr = 16;
x86_pmu.lbr_tos = 0x01c9;
@@ -207,12 +213,10 @@ static void intel_pmu_lbr_init_nhm(void)
x86_pmu.lbr_to = 0x6c0;
}
static void intel_pmu_lbr_init_atom(void)
void intel_pmu_lbr_init_atom(void)
{
x86_pmu.lbr_nr = 8;
x86_pmu.lbr_tos = 0x01c9;
x86_pmu.lbr_from = 0x40;
x86_pmu.lbr_to = 0x60;
}
#endif /* CONFIG_CPU_SUP_INTEL */

View File

@@ -7,9 +7,13 @@
* For licencing details see kernel-base/COPYING
*/
#ifdef CONFIG_CPU_SUP_INTEL
#include <linux/perf_event.h>
#include <asm/perf_event_p4.h>
#include <asm/hardirq.h>
#include <asm/apic.h>
#include "perf_event.h"
#define P4_CNTR_LIMIT 3
/*
@@ -1303,7 +1307,7 @@ static __initconst const struct x86_pmu p4_pmu = {
.perfctr_second_write = 1,
};
static __init int p4_pmu_init(void)
__init int p4_pmu_init(void)
{
unsigned int low, high;
@@ -1326,5 +1330,3 @@ static __init int p4_pmu_init(void)
return 0;
}
#endif /* CONFIG_CPU_SUP_INTEL */

View File

@@ -1,4 +1,7 @@
#ifdef CONFIG_CPU_SUP_INTEL
#include <linux/perf_event.h>
#include <linux/types.h>
#include "perf_event.h"
/*
* Not sure about some of these
@@ -114,7 +117,7 @@ static __initconst const struct x86_pmu p6_pmu = {
.event_constraints = p6_event_constraints,
};
static __init int p6_pmu_init(void)
__init int p6_pmu_init(void)
{
switch (boot_cpu_data.x86_model) {
case 1:
@@ -138,5 +141,3 @@ static __init int p6_pmu_init(void)
return 0;
}
#endif /* CONFIG_CPU_SUP_INTEL */

View File

@@ -32,15 +32,12 @@ int in_crash_kexec;
#if defined(CONFIG_SMP) && defined(CONFIG_X86_LOCAL_APIC)
static void kdump_nmi_callback(int cpu, struct die_args *args)
static void kdump_nmi_callback(int cpu, struct pt_regs *regs)
{
struct pt_regs *regs;
#ifdef CONFIG_X86_32
struct pt_regs fixed_regs;
#endif
regs = args->regs;
#ifdef CONFIG_X86_32
if (!user_mode_vm(regs)) {
crash_fixup_ss_esp(&fixed_regs, regs);

View File

@@ -42,7 +42,7 @@ void arch_jump_label_transform(struct jump_entry *entry,
put_online_cpus();
}
void arch_jump_label_text_poke_early(jump_label_t addr)
void __init_or_module arch_jump_label_text_poke_early(jump_label_t addr)
{
text_poke_early((void *)addr, ideal_nops[NOP_ATOMIC5],
JUMP_LABEL_NOP_SIZE);

View File

@@ -511,28 +511,37 @@ single_step_cont(struct pt_regs *regs, struct die_args *args)
static int was_in_debug_nmi[NR_CPUS];
static int __kgdb_notify(struct die_args *args, unsigned long cmd)
static int kgdb_nmi_handler(unsigned int cmd, struct pt_regs *regs)
{
struct pt_regs *regs = args->regs;
switch (cmd) {
case DIE_NMI:
case NMI_LOCAL:
if (atomic_read(&kgdb_active) != -1) {
/* KGDB CPU roundup */
kgdb_nmicallback(raw_smp_processor_id(), regs);
was_in_debug_nmi[raw_smp_processor_id()] = 1;
touch_nmi_watchdog();
return NOTIFY_STOP;
return NMI_HANDLED;
}
return NOTIFY_DONE;
break;
case DIE_NMIUNKNOWN:
case NMI_UNKNOWN:
if (was_in_debug_nmi[raw_smp_processor_id()]) {
was_in_debug_nmi[raw_smp_processor_id()] = 0;
return NOTIFY_STOP;
return NMI_HANDLED;
}
return NOTIFY_DONE;
break;
default:
/* do nothing */
break;
}
return NMI_DONE;
}
static int __kgdb_notify(struct die_args *args, unsigned long cmd)
{
struct pt_regs *regs = args->regs;
switch (cmd) {
case DIE_DEBUG:
if (atomic_read(&kgdb_cpu_doing_single_step) != -1) {
if (user_mode(regs))
@@ -590,11 +599,6 @@ kgdb_notify(struct notifier_block *self, unsigned long cmd, void *ptr)
static struct notifier_block kgdb_notifier = {
.notifier_call = kgdb_notify,
/*
* Lowest-prio notifier priority, we want to be notified last:
*/
.priority = NMI_LOCAL_LOW_PRIOR,
};
/**
@@ -605,7 +609,31 @@ static struct notifier_block kgdb_notifier = {
*/
int kgdb_arch_init(void)
{
return register_die_notifier(&kgdb_notifier);
int retval;
retval = register_die_notifier(&kgdb_notifier);
if (retval)
goto out;
retval = register_nmi_handler(NMI_LOCAL, kgdb_nmi_handler,
0, "kgdb");
if (retval)
goto out1;
retval = register_nmi_handler(NMI_UNKNOWN, kgdb_nmi_handler,
0, "kgdb");
if (retval)
goto out2;
return retval;
out2:
unregister_nmi_handler(NMI_LOCAL, "kgdb");
out1:
unregister_die_notifier(&kgdb_notifier);
out:
return retval;
}
static void kgdb_hw_overflow_handler(struct perf_event *event,
@@ -673,6 +701,8 @@ void kgdb_arch_exit(void)
breakinfo[i].pev = NULL;
}
}
unregister_nmi_handler(NMI_UNKNOWN, "kgdb");
unregister_nmi_handler(NMI_LOCAL, "kgdb");
unregister_die_notifier(&kgdb_notifier);
}

View File

@@ -75,10 +75,11 @@ DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk);
/*
* Undefined/reserved opcodes, conditional jump, Opcode Extension
* Groups, and some special opcodes can not boost.
* This is non-const to keep gcc from statically optimizing it out, as
* variable_test_bit makes gcc think only *(unsigned long*) is used.
* This is non-const and volatile to keep gcc from statically
* optimizing it out, as variable_test_bit makes gcc think only
* *(unsigned long*) is used.
*/
static u32 twobyte_is_boostable[256 / 32] = {
static volatile u32 twobyte_is_boostable[256 / 32] = {
/* 0 1 2 3 4 5 6 7 8 9 a b c d e f */
/* ---------------------------------------------- */
W(0x00, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0) | /* 00 */

433
arch/x86/kernel/nmi.c Normal file
View File

@@ -0,0 +1,433 @@
/*
* Copyright (C) 1991, 1992 Linus Torvalds
* Copyright (C) 2000, 2001, 2002 Andi Kleen, SuSE Labs
* Copyright (C) 2011 Don Zickus Red Hat, Inc.
*
* Pentium III FXSR, SSE support
* Gareth Hughes <gareth@valinux.com>, May 2000
*/
/*
* Handle hardware traps and faults.
*/
#include <linux/spinlock.h>
#include <linux/kprobes.h>
#include <linux/kdebug.h>
#include <linux/nmi.h>
#include <linux/delay.h>
#include <linux/hardirq.h>
#include <linux/slab.h>
#include <linux/mca.h>
#if defined(CONFIG_EDAC)
#include <linux/edac.h>
#endif
#include <linux/atomic.h>
#include <asm/traps.h>
#include <asm/mach_traps.h>
#include <asm/nmi.h>
#define NMI_MAX_NAMELEN 16
struct nmiaction {
struct list_head list;
nmi_handler_t handler;
unsigned int flags;
char *name;
};
struct nmi_desc {
spinlock_t lock;
struct list_head head;
};
static struct nmi_desc nmi_desc[NMI_MAX] =
{
{
.lock = __SPIN_LOCK_UNLOCKED(&nmi_desc[0].lock),
.head = LIST_HEAD_INIT(nmi_desc[0].head),
},
{
.lock = __SPIN_LOCK_UNLOCKED(&nmi_desc[1].lock),
.head = LIST_HEAD_INIT(nmi_desc[1].head),
},
};
struct nmi_stats {
unsigned int normal;
unsigned int unknown;
unsigned int external;
unsigned int swallow;
};
static DEFINE_PER_CPU(struct nmi_stats, nmi_stats);
static int ignore_nmis;
int unknown_nmi_panic;
/*
* Prevent NMI reason port (0x61) being accessed simultaneously, can
* only be used in NMI handler.
*/
static DEFINE_RAW_SPINLOCK(nmi_reason_lock);
static int __init setup_unknown_nmi_panic(char *str)
{
unknown_nmi_panic = 1;
return 1;
}
__setup("unknown_nmi_panic", setup_unknown_nmi_panic);
#define nmi_to_desc(type) (&nmi_desc[type])
static int notrace __kprobes nmi_handle(unsigned int type, struct pt_regs *regs, bool b2b)
{
struct nmi_desc *desc = nmi_to_desc(type);
struct nmiaction *a;
int handled=0;
rcu_read_lock();
/*
* NMIs are edge-triggered, which means if you have enough
* of them concurrently, you can lose some because only one
* can be latched at any given time. Walk the whole list
* to handle those situations.
*/
list_for_each_entry_rcu(a, &desc->head, list)
handled += a->handler(type, regs);
rcu_read_unlock();
/* return total number of NMI events handled */
return handled;
}
static int __setup_nmi(unsigned int type, struct nmiaction *action)
{
struct nmi_desc *desc = nmi_to_desc(type);
unsigned long flags;
spin_lock_irqsave(&desc->lock, flags);
/*
* most handlers of type NMI_UNKNOWN never return because
* they just assume the NMI is theirs. Just a sanity check
* to manage expectations
*/
WARN_ON_ONCE(type == NMI_UNKNOWN && !list_empty(&desc->head));
/*
* some handlers need to be executed first otherwise a fake
* event confuses some handlers (kdump uses this flag)
*/
if (action->flags & NMI_FLAG_FIRST)
list_add_rcu(&action->list, &desc->head);
else
list_add_tail_rcu(&action->list, &desc->head);
spin_unlock_irqrestore(&desc->lock, flags);
return 0;
}
static struct nmiaction *__free_nmi(unsigned int type, const char *name)
{
struct nmi_desc *desc = nmi_to_desc(type);
struct nmiaction *n;
unsigned long flags;
spin_lock_irqsave(&desc->lock, flags);
list_for_each_entry_rcu(n, &desc->head, list) {
/*
* the name passed in to describe the nmi handler
* is used as the lookup key
*/
if (!strcmp(n->name, name)) {
WARN(in_nmi(),
"Trying to free NMI (%s) from NMI context!\n", n->name);
list_del_rcu(&n->list);
break;
}
}
spin_unlock_irqrestore(&desc->lock, flags);
synchronize_rcu();
return (n);
}
int register_nmi_handler(unsigned int type, nmi_handler_t handler,
unsigned long nmiflags, const char *devname)
{
struct nmiaction *action;
int retval = -ENOMEM;
if (!handler)
return -EINVAL;
action = kzalloc(sizeof(struct nmiaction), GFP_KERNEL);
if (!action)
goto fail_action;
action->handler = handler;
action->flags = nmiflags;
action->name = kstrndup(devname, NMI_MAX_NAMELEN, GFP_KERNEL);
if (!action->name)
goto fail_action_name;
retval = __setup_nmi(type, action);
if (retval)
goto fail_setup_nmi;
return retval;
fail_setup_nmi:
kfree(action->name);
fail_action_name:
kfree(action);
fail_action:
return retval;
}
EXPORT_SYMBOL_GPL(register_nmi_handler);
void unregister_nmi_handler(unsigned int type, const char *name)
{
struct nmiaction *a;
a = __free_nmi(type, name);
if (a) {
kfree(a->name);
kfree(a);
}
}
EXPORT_SYMBOL_GPL(unregister_nmi_handler);
static notrace __kprobes void
pci_serr_error(unsigned char reason, struct pt_regs *regs)
{
pr_emerg("NMI: PCI system error (SERR) for reason %02x on CPU %d.\n",
reason, smp_processor_id());
/*
* On some machines, PCI SERR line is used to report memory
* errors. EDAC makes use of it.
*/
#if defined(CONFIG_EDAC)
if (edac_handler_set()) {
edac_atomic_assert_error();
return;
}
#endif
if (panic_on_unrecovered_nmi)
panic("NMI: Not continuing");
pr_emerg("Dazed and confused, but trying to continue\n");
/* Clear and disable the PCI SERR error line. */
reason = (reason & NMI_REASON_CLEAR_MASK) | NMI_REASON_CLEAR_SERR;
outb(reason, NMI_REASON_PORT);
}
static notrace __kprobes void
io_check_error(unsigned char reason, struct pt_regs *regs)
{
unsigned long i;
pr_emerg(
"NMI: IOCK error (debug interrupt?) for reason %02x on CPU %d.\n",
reason, smp_processor_id());
show_registers(regs);
if (panic_on_io_nmi)
panic("NMI IOCK error: Not continuing");
/* Re-enable the IOCK line, wait for a few seconds */
reason = (reason & NMI_REASON_CLEAR_MASK) | NMI_REASON_CLEAR_IOCHK;
outb(reason, NMI_REASON_PORT);
i = 20000;
while (--i) {
touch_nmi_watchdog();
udelay(100);
}
reason &= ~NMI_REASON_CLEAR_IOCHK;
outb(reason, NMI_REASON_PORT);
}
static notrace __kprobes void
unknown_nmi_error(unsigned char reason, struct pt_regs *regs)
{
int handled;
/*
* Use 'false' as back-to-back NMIs are dealt with one level up.
* Of course this makes having multiple 'unknown' handlers useless
* as only the first one is ever run (unless it can actually determine
* if it caused the NMI)
*/
handled = nmi_handle(NMI_UNKNOWN, regs, false);
if (handled) {
__this_cpu_add(nmi_stats.unknown, handled);
return;
}
__this_cpu_add(nmi_stats.unknown, 1);
#ifdef CONFIG_MCA
/*
* Might actually be able to figure out what the guilty party
* is:
*/
if (MCA_bus) {
mca_handle_nmi();
return;
}
#endif
pr_emerg("Uhhuh. NMI received for unknown reason %02x on CPU %d.\n",
reason, smp_processor_id());
pr_emerg("Do you have a strange power saving mode enabled?\n");
if (unknown_nmi_panic || panic_on_unrecovered_nmi)
panic("NMI: Not continuing");
pr_emerg("Dazed and confused, but trying to continue\n");
}
static DEFINE_PER_CPU(bool, swallow_nmi);
static DEFINE_PER_CPU(unsigned long, last_nmi_rip);
static notrace __kprobes void default_do_nmi(struct pt_regs *regs)
{
unsigned char reason = 0;
int handled;
bool b2b = false;
/*
* CPU-specific NMI must be processed before non-CPU-specific
* NMI, otherwise we may lose it, because the CPU-specific
* NMI can not be detected/processed on other CPUs.
*/
/*
* Back-to-back NMIs are interesting because they can either
* be two NMI or more than two NMIs (any thing over two is dropped
* due to NMI being edge-triggered). If this is the second half
* of the back-to-back NMI, assume we dropped things and process
* more handlers. Otherwise reset the 'swallow' NMI behaviour
*/
if (regs->ip == __this_cpu_read(last_nmi_rip))
b2b = true;
else
__this_cpu_write(swallow_nmi, false);
__this_cpu_write(last_nmi_rip, regs->ip);
handled = nmi_handle(NMI_LOCAL, regs, b2b);
__this_cpu_add(nmi_stats.normal, handled);
if (handled) {
/*
* There are cases when a NMI handler handles multiple
* events in the current NMI. One of these events may
* be queued for in the next NMI. Because the event is
* already handled, the next NMI will result in an unknown
* NMI. Instead lets flag this for a potential NMI to
* swallow.
*/
if (handled > 1)
__this_cpu_write(swallow_nmi, true);
return;
}
/* Non-CPU-specific NMI: NMI sources can be processed on any CPU */
raw_spin_lock(&nmi_reason_lock);
reason = get_nmi_reason();
if (reason & NMI_REASON_MASK) {
if (reason & NMI_REASON_SERR)
pci_serr_error(reason, regs);
else if (reason & NMI_REASON_IOCHK)
io_check_error(reason, regs);
#ifdef CONFIG_X86_32
/*
* Reassert NMI in case it became active
* meanwhile as it's edge-triggered:
*/
reassert_nmi();
#endif
__this_cpu_add(nmi_stats.external, 1);
raw_spin_unlock(&nmi_reason_lock);
return;
}
raw_spin_unlock(&nmi_reason_lock);
/*
* Only one NMI can be latched at a time. To handle
* this we may process multiple nmi handlers at once to
* cover the case where an NMI is dropped. The downside
* to this approach is we may process an NMI prematurely,
* while its real NMI is sitting latched. This will cause
* an unknown NMI on the next run of the NMI processing.
*
* We tried to flag that condition above, by setting the
* swallow_nmi flag when we process more than one event.
* This condition is also only present on the second half
* of a back-to-back NMI, so we flag that condition too.
*
* If both are true, we assume we already processed this
* NMI previously and we swallow it. Otherwise we reset
* the logic.
*
* There are scenarios where we may accidentally swallow
* a 'real' unknown NMI. For example, while processing
* a perf NMI another perf NMI comes in along with a
* 'real' unknown NMI. These two NMIs get combined into
* one (as descibed above). When the next NMI gets
* processed, it will be flagged by perf as handled, but
* noone will know that there was a 'real' unknown NMI sent
* also. As a result it gets swallowed. Or if the first
* perf NMI returns two events handled then the second
* NMI will get eaten by the logic below, again losing a
* 'real' unknown NMI. But this is the best we can do
* for now.
*/
if (b2b && __this_cpu_read(swallow_nmi))
__this_cpu_add(nmi_stats.swallow, 1);
else
unknown_nmi_error(reason, regs);
}
dotraplinkage notrace __kprobes void
do_nmi(struct pt_regs *regs, long error_code)
{
nmi_enter();
inc_irq_stat(__nmi_count);
if (!ignore_nmis)
default_do_nmi(regs);
nmi_exit();
}
void stop_nmi(void)
{
ignore_nmis++;
}
void restart_nmi(void)
{
ignore_nmis--;
}
/* reset the back-to-back NMI logic */
void local_touch_nmi(void)
{
__this_cpu_write(last_nmi_rip, 0);
}

View File

@@ -57,6 +57,7 @@
#include <asm/idle.h>
#include <asm/syscalls.h>
#include <asm/debugreg.h>
#include <asm/nmi.h>
asmlinkage void ret_from_fork(void) __asm__("ret_from_fork");
@@ -107,6 +108,7 @@ void cpu_idle(void)
if (cpu_is_offline(cpu))
play_dead();
local_touch_nmi();
local_irq_disable();
/* Don't trace irqs off for idle */
stop_critical_timings();

View File

@@ -51,6 +51,7 @@
#include <asm/idle.h>
#include <asm/syscalls.h>
#include <asm/debugreg.h>
#include <asm/nmi.h>
asmlinkage extern void ret_from_fork(void);
@@ -133,6 +134,7 @@ void cpu_idle(void)
* from here on, until they go to idle.
* Otherwise, idle callbacks can misfire.
*/
local_touch_nmi();
local_irq_disable();
enter_idle();
/* Don't trace irqs off for idle */

View File

@@ -464,7 +464,7 @@ static inline void kb_wait(void)
}
}
static void vmxoff_nmi(int cpu, struct die_args *args)
static void vmxoff_nmi(int cpu, struct pt_regs *regs)
{
cpu_emergency_vmxoff();
}
@@ -736,14 +736,10 @@ static nmi_shootdown_cb shootdown_callback;
static atomic_t waiting_for_crash_ipi;
static int crash_nmi_callback(struct notifier_block *self,
unsigned long val, void *data)
static int crash_nmi_callback(unsigned int val, struct pt_regs *regs)
{
int cpu;
if (val != DIE_NMI)
return NOTIFY_OK;
cpu = raw_smp_processor_id();
/* Don't do anything if this handler is invoked on crashing cpu.
@@ -751,10 +747,10 @@ static int crash_nmi_callback(struct notifier_block *self,
* an NMI if system was initially booted with nmi_watchdog parameter.
*/
if (cpu == crashing_cpu)
return NOTIFY_STOP;
return NMI_HANDLED;
local_irq_disable();
shootdown_callback(cpu, (struct die_args *)data);
shootdown_callback(cpu, regs);
atomic_dec(&waiting_for_crash_ipi);
/* Assume hlt works */
@@ -762,7 +758,7 @@ static int crash_nmi_callback(struct notifier_block *self,
for (;;)
cpu_relax();
return 1;
return NMI_HANDLED;
}
static void smp_send_nmi_allbutself(void)
@@ -770,12 +766,6 @@ static void smp_send_nmi_allbutself(void)
apic->send_IPI_allbutself(NMI_VECTOR);
}
static struct notifier_block crash_nmi_nb = {
.notifier_call = crash_nmi_callback,
/* we want to be the first one called */
.priority = NMI_LOCAL_HIGH_PRIOR+1,
};
/* Halt all other CPUs, calling the specified function on each of them
*
* This function can be used to halt all other CPUs on crash
@@ -794,7 +784,8 @@ void nmi_shootdown_cpus(nmi_shootdown_cb callback)
atomic_set(&waiting_for_crash_ipi, num_online_cpus() - 1);
/* Would it be better to replace the trap vector here? */
if (register_die_notifier(&crash_nmi_nb))
if (register_nmi_handler(NMI_LOCAL, crash_nmi_callback,
NMI_FLAG_FIRST, "crash"))
return; /* return what? */
/* Ensure the new callback function is set before sending
* out the NMI

View File

@@ -81,15 +81,6 @@ gate_desc idt_table[NR_VECTORS] __page_aligned_data = { { { { 0, 0 } } }, };
DECLARE_BITMAP(used_vectors, NR_VECTORS);
EXPORT_SYMBOL_GPL(used_vectors);
static int ignore_nmis;
int unknown_nmi_panic;
/*
* Prevent NMI reason port (0x61) being accessed simultaneously, can
* only be used in NMI handler.
*/
static DEFINE_RAW_SPINLOCK(nmi_reason_lock);
static inline void conditional_sti(struct pt_regs *regs)
{
if (regs->flags & X86_EFLAGS_IF)
@@ -307,152 +298,6 @@ gp_in_kernel:
die("general protection fault", regs, error_code);
}
static int __init setup_unknown_nmi_panic(char *str)
{
unknown_nmi_panic = 1;
return 1;
}
__setup("unknown_nmi_panic", setup_unknown_nmi_panic);
static notrace __kprobes void
pci_serr_error(unsigned char reason, struct pt_regs *regs)
{
pr_emerg("NMI: PCI system error (SERR) for reason %02x on CPU %d.\n",
reason, smp_processor_id());
/*
* On some machines, PCI SERR line is used to report memory
* errors. EDAC makes use of it.
*/
#if defined(CONFIG_EDAC)
if (edac_handler_set()) {
edac_atomic_assert_error();
return;
}
#endif
if (panic_on_unrecovered_nmi)
panic("NMI: Not continuing");
pr_emerg("Dazed and confused, but trying to continue\n");
/* Clear and disable the PCI SERR error line. */
reason = (reason & NMI_REASON_CLEAR_MASK) | NMI_REASON_CLEAR_SERR;
outb(reason, NMI_REASON_PORT);
}
static notrace __kprobes void
io_check_error(unsigned char reason, struct pt_regs *regs)
{
unsigned long i;
pr_emerg(
"NMI: IOCK error (debug interrupt?) for reason %02x on CPU %d.\n",
reason, smp_processor_id());
show_registers(regs);
if (panic_on_io_nmi)
panic("NMI IOCK error: Not continuing");
/* Re-enable the IOCK line, wait for a few seconds */
reason = (reason & NMI_REASON_CLEAR_MASK) | NMI_REASON_CLEAR_IOCHK;
outb(reason, NMI_REASON_PORT);
i = 20000;
while (--i) {
touch_nmi_watchdog();
udelay(100);
}
reason &= ~NMI_REASON_CLEAR_IOCHK;
outb(reason, NMI_REASON_PORT);
}
static notrace __kprobes void
unknown_nmi_error(unsigned char reason, struct pt_regs *regs)
{
if (notify_die(DIE_NMIUNKNOWN, "nmi", regs, reason, 2, SIGINT) ==
NOTIFY_STOP)
return;
#ifdef CONFIG_MCA
/*
* Might actually be able to figure out what the guilty party
* is:
*/
if (MCA_bus) {
mca_handle_nmi();
return;
}
#endif
pr_emerg("Uhhuh. NMI received for unknown reason %02x on CPU %d.\n",
reason, smp_processor_id());
pr_emerg("Do you have a strange power saving mode enabled?\n");
if (unknown_nmi_panic || panic_on_unrecovered_nmi)
panic("NMI: Not continuing");
pr_emerg("Dazed and confused, but trying to continue\n");
}
static notrace __kprobes void default_do_nmi(struct pt_regs *regs)
{
unsigned char reason = 0;
/*
* CPU-specific NMI must be processed before non-CPU-specific
* NMI, otherwise we may lose it, because the CPU-specific
* NMI can not be detected/processed on other CPUs.
*/
if (notify_die(DIE_NMI, "nmi", regs, 0, 2, SIGINT) == NOTIFY_STOP)
return;
/* Non-CPU-specific NMI: NMI sources can be processed on any CPU */
raw_spin_lock(&nmi_reason_lock);
reason = get_nmi_reason();
if (reason & NMI_REASON_MASK) {
if (reason & NMI_REASON_SERR)
pci_serr_error(reason, regs);
else if (reason & NMI_REASON_IOCHK)
io_check_error(reason, regs);
#ifdef CONFIG_X86_32
/*
* Reassert NMI in case it became active
* meanwhile as it's edge-triggered:
*/
reassert_nmi();
#endif
raw_spin_unlock(&nmi_reason_lock);
return;
}
raw_spin_unlock(&nmi_reason_lock);
unknown_nmi_error(reason, regs);
}
dotraplinkage notrace __kprobes void
do_nmi(struct pt_regs *regs, long error_code)
{
nmi_enter();
inc_irq_stat(__nmi_count);
if (!ignore_nmis)
default_do_nmi(regs);
nmi_exit();
}
void stop_nmi(void)
{
ignore_nmis++;
}
void restart_nmi(void)
{
ignore_nmis--;
}
/* May run on IST stack. */
dotraplinkage void __kprobes do_int3(struct pt_regs *regs, long error_code)
{