Merge branch 'perf-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip
* 'perf-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip: perf, x86: Try to handle unknown nmis with an enabled PMU perf, x86: Fix handle_irq return values perf, x86: Fix accidentally ack'ing a second event on intel perf counter oprofile, x86: fix init_sysfs() function stub lockup_detector: Sync touch_*_watchdog back to old semantics tracing: Fix a race in function profile oprofile, x86: fix init_sysfs error handling perf_events: Fix time tracking for events with pid != -1 and cpu != -1 perf: Initialize callchains roots's childen hits oprofile: fix crash when accessing freed task structs
This commit is contained in:
@@ -1154,7 +1154,7 @@ static int x86_pmu_handle_irq(struct pt_regs *regs)
|
||||
/*
|
||||
* event overflow
|
||||
*/
|
||||
handled = 1;
|
||||
handled++;
|
||||
data.period = event->hw.last_period;
|
||||
|
||||
if (!x86_perf_event_set_period(event))
|
||||
@@ -1200,12 +1200,20 @@ void perf_events_lapic_init(void)
|
||||
apic_write(APIC_LVTPC, APIC_DM_NMI);
|
||||
}
|
||||
|
||||
struct pmu_nmi_state {
|
||||
unsigned int marked;
|
||||
int handled;
|
||||
};
|
||||
|
||||
static DEFINE_PER_CPU(struct pmu_nmi_state, pmu_nmi);
|
||||
|
||||
static int __kprobes
|
||||
perf_event_nmi_handler(struct notifier_block *self,
|
||||
unsigned long cmd, void *__args)
|
||||
{
|
||||
struct die_args *args = __args;
|
||||
struct pt_regs *regs;
|
||||
unsigned int this_nmi;
|
||||
int handled;
|
||||
|
||||
if (!atomic_read(&active_events))
|
||||
return NOTIFY_DONE;
|
||||
@@ -1214,22 +1222,47 @@ perf_event_nmi_handler(struct notifier_block *self,
|
||||
case DIE_NMI:
|
||||
case DIE_NMI_IPI:
|
||||
break;
|
||||
|
||||
case DIE_NMIUNKNOWN:
|
||||
this_nmi = percpu_read(irq_stat.__nmi_count);
|
||||
if (this_nmi != __get_cpu_var(pmu_nmi).marked)
|
||||
/* let the kernel handle the unknown nmi */
|
||||
return NOTIFY_DONE;
|
||||
/*
|
||||
* This one is a PMU back-to-back nmi. Two events
|
||||
* trigger 'simultaneously' raising two back-to-back
|
||||
* NMIs. If the first NMI handles both, the latter
|
||||
* will be empty and daze the CPU. So, we drop it to
|
||||
* avoid false-positive 'unknown nmi' messages.
|
||||
*/
|
||||
return NOTIFY_STOP;
|
||||
default:
|
||||
return NOTIFY_DONE;
|
||||
}
|
||||
|
||||
regs = args->regs;
|
||||
|
||||
apic_write(APIC_LVTPC, APIC_DM_NMI);
|
||||
/*
|
||||
* Can't rely on the handled return value to say it was our NMI, two
|
||||
* events could trigger 'simultaneously' raising two back-to-back NMIs.
|
||||
*
|
||||
* If the first NMI handles both, the latter will be empty and daze
|
||||
* the CPU.
|
||||
*/
|
||||
x86_pmu.handle_irq(regs);
|
||||
|
||||
handled = x86_pmu.handle_irq(args->regs);
|
||||
if (!handled)
|
||||
return NOTIFY_DONE;
|
||||
|
||||
this_nmi = percpu_read(irq_stat.__nmi_count);
|
||||
if ((handled > 1) ||
|
||||
/* the next nmi could be a back-to-back nmi */
|
||||
((__get_cpu_var(pmu_nmi).marked == this_nmi) &&
|
||||
(__get_cpu_var(pmu_nmi).handled > 1))) {
|
||||
/*
|
||||
* We could have two subsequent back-to-back nmis: The
|
||||
* first handles more than one counter, the 2nd
|
||||
* handles only one counter and the 3rd handles no
|
||||
* counter.
|
||||
*
|
||||
* This is the 2nd nmi because the previous was
|
||||
* handling more than one counter. We will mark the
|
||||
* next (3rd) and then drop it if unhandled.
|
||||
*/
|
||||
__get_cpu_var(pmu_nmi).marked = this_nmi + 1;
|
||||
__get_cpu_var(pmu_nmi).handled = handled;
|
||||
}
|
||||
|
||||
return NOTIFY_STOP;
|
||||
}
|
||||
|
@@ -712,7 +712,8 @@ static int intel_pmu_handle_irq(struct pt_regs *regs)
|
||||
struct perf_sample_data data;
|
||||
struct cpu_hw_events *cpuc;
|
||||
int bit, loops;
|
||||
u64 ack, status;
|
||||
u64 status;
|
||||
int handled = 0;
|
||||
|
||||
perf_sample_data_init(&data, 0);
|
||||
|
||||
@@ -728,6 +729,7 @@ static int intel_pmu_handle_irq(struct pt_regs *regs)
|
||||
|
||||
loops = 0;
|
||||
again:
|
||||
intel_pmu_ack_status(status);
|
||||
if (++loops > 100) {
|
||||
WARN_ONCE(1, "perfevents: irq loop stuck!\n");
|
||||
perf_event_print_debug();
|
||||
@@ -736,19 +738,22 @@ again:
|
||||
}
|
||||
|
||||
inc_irq_stat(apic_perf_irqs);
|
||||
ack = status;
|
||||
|
||||
intel_pmu_lbr_read();
|
||||
|
||||
/*
|
||||
* PEBS overflow sets bit 62 in the global status register
|
||||
*/
|
||||
if (__test_and_clear_bit(62, (unsigned long *)&status))
|
||||
if (__test_and_clear_bit(62, (unsigned long *)&status)) {
|
||||
handled++;
|
||||
x86_pmu.drain_pebs(regs);
|
||||
}
|
||||
|
||||
for_each_set_bit(bit, (unsigned long *)&status, X86_PMC_IDX_MAX) {
|
||||
struct perf_event *event = cpuc->events[bit];
|
||||
|
||||
handled++;
|
||||
|
||||
if (!test_bit(bit, cpuc->active_mask))
|
||||
continue;
|
||||
|
||||
@@ -761,8 +766,6 @@ again:
|
||||
x86_pmu_stop(event);
|
||||
}
|
||||
|
||||
intel_pmu_ack_status(ack);
|
||||
|
||||
/*
|
||||
* Repeat if there is more work to be done:
|
||||
*/
|
||||
@@ -772,7 +775,7 @@ again:
|
||||
|
||||
done:
|
||||
intel_pmu_enable_all(0);
|
||||
return 1;
|
||||
return handled;
|
||||
}
|
||||
|
||||
static struct event_constraint *
|
||||
|
@@ -692,7 +692,7 @@ static int p4_pmu_handle_irq(struct pt_regs *regs)
|
||||
inc_irq_stat(apic_perf_irqs);
|
||||
}
|
||||
|
||||
return handled > 0;
|
||||
return handled;
|
||||
}
|
||||
|
||||
/*
|
||||
|
@@ -568,8 +568,13 @@ static int __init init_sysfs(void)
|
||||
int error;
|
||||
|
||||
error = sysdev_class_register(&oprofile_sysclass);
|
||||
if (!error)
|
||||
error = sysdev_register(&device_oprofile);
|
||||
if (error)
|
||||
return error;
|
||||
|
||||
error = sysdev_register(&device_oprofile);
|
||||
if (error)
|
||||
sysdev_class_unregister(&oprofile_sysclass);
|
||||
|
||||
return error;
|
||||
}
|
||||
|
||||
@@ -580,8 +585,10 @@ static void exit_sysfs(void)
|
||||
}
|
||||
|
||||
#else
|
||||
#define init_sysfs() do { } while (0)
|
||||
#define exit_sysfs() do { } while (0)
|
||||
|
||||
static inline int init_sysfs(void) { return 0; }
|
||||
static inline void exit_sysfs(void) { }
|
||||
|
||||
#endif /* CONFIG_PM */
|
||||
|
||||
static int __init p4_init(char **cpu_type)
|
||||
@@ -695,6 +702,8 @@ int __init op_nmi_init(struct oprofile_operations *ops)
|
||||
char *cpu_type = NULL;
|
||||
int ret = 0;
|
||||
|
||||
using_nmi = 0;
|
||||
|
||||
if (!cpu_has_apic)
|
||||
return -ENODEV;
|
||||
|
||||
@@ -774,7 +783,10 @@ int __init op_nmi_init(struct oprofile_operations *ops)
|
||||
|
||||
mux_init(ops);
|
||||
|
||||
init_sysfs();
|
||||
ret = init_sysfs();
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
using_nmi = 1;
|
||||
printk(KERN_INFO "oprofile: using NMI interrupt.\n");
|
||||
return 0;
|
||||
|
Reference in New Issue
Block a user