x86, perf: Clean up perf_event cpu code

The CPU support for perf events on x86 was implemented via included C files
with #ifdefs.  Clean this up by creating a new header file and compiling
the vendor-specific files as needed.

Signed-off-by: Kevin Winchester <kjwinchester@gmail.com>
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Link: http://lkml.kernel.org/r/1314747665-2090-1-git-send-email-kjwinchester@gmail.com
Signed-off-by: Ingo Molnar <mingo@elte.hu>
This commit is contained in:
Kevin Winchester
2011-08-30 20:41:05 -03:00
committed by Ingo Molnar
parent ed3982cf37
commit de0428a7ad
9 changed files with 604 additions and 460 deletions

View File

@@ -1,16 +1,19 @@
#ifdef CONFIG_CPU_SUP_INTEL
/*
* Per core/cpu state
*
* Used to coordinate shared registers between HT threads or
* among events on a single PMU.
*/
struct intel_shared_regs {
struct er_account regs[EXTRA_REG_MAX];
int refcnt; /* per-core: #HT threads */
unsigned core_id; /* per-core: core id */
};
#include <linux/stddef.h>
#include <linux/types.h>
#include <linux/init.h>
#include <linux/slab.h>
#include <asm/hardirq.h>
#include <asm/apic.h>
#include "perf_event.h"
/*
* Intel PerfMon, used on Core and later.
@@ -945,7 +948,7 @@ static void intel_pmu_enable_event(struct perf_event *event)
* Save and restart an expired event. Called by NMI contexts,
* so it has to be careful about preempting normal event ops:
*/
static int intel_pmu_save_and_restart(struct perf_event *event)
int intel_pmu_save_and_restart(struct perf_event *event)
{
x86_perf_event_update(event);
return x86_perf_event_set_period(event);
@@ -1197,6 +1200,21 @@ intel_shared_regs_constraints(struct cpu_hw_events *cpuc,
return c;
}
struct event_constraint *
x86_get_event_constraints(struct cpu_hw_events *cpuc, struct perf_event *event)
{
struct event_constraint *c;
if (x86_pmu.event_constraints) {
for_each_event_constraint(c, x86_pmu.event_constraints) {
if ((event->hw.config & c->cmask) == c->code)
return c;
}
}
return &unconstrained;
}
static struct event_constraint *
intel_get_event_constraints(struct cpu_hw_events *cpuc, struct perf_event *event)
{
@@ -1309,7 +1327,7 @@ static __initconst const struct x86_pmu core_pmu = {
.event_constraints = intel_core_event_constraints,
};
static struct intel_shared_regs *allocate_shared_regs(int cpu)
struct intel_shared_regs *allocate_shared_regs(int cpu)
{
struct intel_shared_regs *regs;
int i;
@@ -1441,7 +1459,7 @@ static void intel_clovertown_quirks(void)
x86_pmu.pebs_constraints = NULL;
}
static __init int intel_pmu_init(void)
__init int intel_pmu_init(void)
{
union cpuid10_edx edx;
union cpuid10_eax eax;
@@ -1597,7 +1615,7 @@ static __init int intel_pmu_init(void)
intel_pmu_lbr_init_nhm();
x86_pmu.event_constraints = intel_snb_event_constraints;
x86_pmu.pebs_constraints = intel_snb_pebs_events;
x86_pmu.pebs_constraints = intel_snb_pebs_event_constraints;
x86_pmu.extra_regs = intel_snb_extra_regs;
/* all extra regs are per-cpu when HT is on */
x86_pmu.er_flags |= ERF_HAS_RSP_1;
@@ -1628,16 +1646,3 @@ static __init int intel_pmu_init(void)
}
return 0;
}
#else /* CONFIG_CPU_SUP_INTEL */
static int intel_pmu_init(void)
{
return 0;
}
static struct intel_shared_regs *allocate_shared_regs(int cpu)
{
return NULL;
}
#endif /* CONFIG_CPU_SUP_INTEL */