perf_counter, x86: rework pmc_amd_save_disable_all() and pmc_amd_restore_all()

MSR reads and writes are expensive. This patch adds checks to avoid
its usage where possible.

[ Impact: micro-optimization on AMD CPUs ]

Signed-off-by: Robert Richter <robert.richter@amd.com>
Cc: Paul Mackerras <paulus@samba.org>
Acked-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
LKML-Reference: <1241002046-8832-5-git-send-email-robert.richter@amd.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
This commit is contained in:
Robert Richter
2009-04-29 12:47:01 +02:00
committed by Ingo Molnar
parent 4138960a92
commit 4295ee6266

View File

@@ -334,12 +334,14 @@ static u64 pmc_amd_save_disable_all(void)
for (idx = 0; idx < nr_counters_generic; idx++) { for (idx = 0; idx < nr_counters_generic; idx++) {
u64 val; u64 val;
if (!test_bit(idx, cpuc->active_mask))
continue;
rdmsrl(MSR_K7_EVNTSEL0 + idx, val); rdmsrl(MSR_K7_EVNTSEL0 + idx, val);
if (val & ARCH_PERFMON_EVENTSEL0_ENABLE) { if (!(val & ARCH_PERFMON_EVENTSEL0_ENABLE))
continue;
val &= ~ARCH_PERFMON_EVENTSEL0_ENABLE; val &= ~ARCH_PERFMON_EVENTSEL0_ENABLE;
wrmsrl(MSR_K7_EVNTSEL0 + idx, val); wrmsrl(MSR_K7_EVNTSEL0 + idx, val);
} }
}
return enabled; return enabled;
} }
@@ -372,15 +374,17 @@ static void pmc_amd_restore_all(u64 ctrl)
return; return;
for (idx = 0; idx < nr_counters_generic; idx++) { for (idx = 0; idx < nr_counters_generic; idx++) {
if (test_bit(idx, cpuc->active_mask)) {
u64 val; u64 val;
if (!test_bit(idx, cpuc->active_mask))
continue;
rdmsrl(MSR_K7_EVNTSEL0 + idx, val); rdmsrl(MSR_K7_EVNTSEL0 + idx, val);
if (val & ARCH_PERFMON_EVENTSEL0_ENABLE)
continue;
val |= ARCH_PERFMON_EVENTSEL0_ENABLE; val |= ARCH_PERFMON_EVENTSEL0_ENABLE;
wrmsrl(MSR_K7_EVNTSEL0 + idx, val); wrmsrl(MSR_K7_EVNTSEL0 + idx, val);
} }
} }
}
void hw_perf_restore(u64 ctrl) void hw_perf_restore(u64 ctrl)
{ {