perf: Per PMU disable

Changes perf_disable() into perf_pmu_disable().

Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: paulus <paulus@samba.org>
Cc: stephane eranian <eranian@googlemail.com>
Cc: Robert Richter <robert.richter@amd.com>
Cc: Will Deacon <will.deacon@arm.com>
Cc: Paul Mundt <lethal@linux-sh.org>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Cyrill Gorcunov <gorcunov@gmail.com>
Cc: Lin Ming <ming.m.lin@intel.com>
Cc: Yanmin <yanmin_zhang@linux.intel.com>
Cc: Deng-Cheng Zhu <dengcheng.zhu@gmail.com>
Cc: David Miller <davem@davemloft.net>
Cc: Michael Cree <mcree@orcon.net.nz>
LKML-Reference: <new-submission>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
This commit is contained in:
Peter Zijlstra
2010-06-14 08:49:00 +02:00
committed by Ingo Molnar
parent 24cd7f54a0
commit 33696fc0d1
9 changed files with 119 additions and 99 deletions

View File

@@ -517,7 +517,7 @@ static void write_mmcr0(struct cpu_hw_events *cpuhw, unsigned long mmcr0)
* Disable all events to prevent PMU interrupts and to allow
* events to be added or removed.
*/
void hw_perf_disable(void)
static void power_pmu_pmu_disable(struct pmu *pmu)
{
struct cpu_hw_events *cpuhw;
unsigned long flags;
@@ -565,7 +565,7 @@ void hw_perf_disable(void)
* If we were previously disabled and events were added, then
* put the new config on the PMU.
*/
void hw_perf_enable(void)
static void power_pmu_pmu_enable(struct pmu *pmu)
{
struct perf_event *event;
struct cpu_hw_events *cpuhw;
@@ -735,7 +735,7 @@ static int power_pmu_enable(struct perf_event *event)
int ret = -EAGAIN;
local_irq_save(flags);
perf_disable();
perf_pmu_disable(event->pmu);
/*
* Add the event to the list (if there is room)
@@ -769,7 +769,7 @@ nocheck:
ret = 0;
out:
perf_enable();
perf_pmu_enable(event->pmu);
local_irq_restore(flags);
return ret;
}
@@ -784,7 +784,7 @@ static void power_pmu_disable(struct perf_event *event)
unsigned long flags;
local_irq_save(flags);
perf_disable();
perf_pmu_disable(event->pmu);
power_pmu_read(event);
@@ -821,7 +821,7 @@ static void power_pmu_disable(struct perf_event *event)
cpuhw->mmcr[0] &= ~(MMCR0_PMXE | MMCR0_FCECE);
}
perf_enable();
perf_pmu_enable(event->pmu);
local_irq_restore(flags);
}
@@ -837,7 +837,7 @@ static void power_pmu_unthrottle(struct perf_event *event)
if (!event->hw.idx || !event->hw.sample_period)
return;
local_irq_save(flags);
perf_disable();
perf_pmu_disable(event->pmu);
power_pmu_read(event);
left = event->hw.sample_period;
event->hw.last_period = left;
@@ -848,7 +848,7 @@ static void power_pmu_unthrottle(struct perf_event *event)
local64_set(&event->hw.prev_count, val);
local64_set(&event->hw.period_left, left);
perf_event_update_userpage(event);
perf_enable();
perf_pmu_enable(event->pmu);
local_irq_restore(flags);
}
@@ -861,7 +861,7 @@ void power_pmu_start_txn(struct pmu *pmu)
{
struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events);
perf_disable();
perf_pmu_disable(pmu);
cpuhw->group_flag |= PERF_EVENT_TXN;
cpuhw->n_txn_start = cpuhw->n_events;
}
@@ -876,7 +876,7 @@ void power_pmu_cancel_txn(struct pmu *pmu)
struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events);
cpuhw->group_flag &= ~PERF_EVENT_TXN;
perf_enable();
perf_pmu_enable(pmu);
}
/*
@@ -903,7 +903,7 @@ int power_pmu_commit_txn(struct pmu *pmu)
cpuhw->event[i]->hw.config = cpuhw->events[i];
cpuhw->group_flag &= ~PERF_EVENT_TXN;
perf_enable();
perf_pmu_enable(pmu);
return 0;
}
@@ -1131,6 +1131,8 @@ static int power_pmu_event_init(struct perf_event *event)
}
struct pmu power_pmu = {
.pmu_enable = power_pmu_pmu_enable,
.pmu_disable = power_pmu_pmu_disable,
.event_init = power_pmu_event_init,
.enable = power_pmu_enable,
.disable = power_pmu_disable,

View File

@@ -177,7 +177,7 @@ static void fsl_emb_pmu_read(struct perf_event *event)
* Disable all events to prevent PMU interrupts and to allow
* events to be added or removed.
*/
void hw_perf_disable(void)
static void fsl_emb_pmu_pmu_disable(struct pmu *pmu)
{
struct cpu_hw_events *cpuhw;
unsigned long flags;
@@ -216,7 +216,7 @@ void hw_perf_disable(void)
* If we were previously disabled and events were added, then
* put the new config on the PMU.
*/
void hw_perf_enable(void)
static void fsl_emb_pmu_pmu_enable(struct pmu *pmu)
{
struct cpu_hw_events *cpuhw;
unsigned long flags;
@@ -271,7 +271,7 @@ static int fsl_emb_pmu_enable(struct perf_event *event)
u64 val;
int i;
perf_disable();
perf_pmu_disable(event->pmu);
cpuhw = &get_cpu_var(cpu_hw_events);
if (event->hw.config & FSL_EMB_EVENT_RESTRICTED)
@@ -311,7 +311,7 @@ static int fsl_emb_pmu_enable(struct perf_event *event)
ret = 0;
out:
put_cpu_var(cpu_hw_events);
perf_enable();
perf_pmu_enable(event->pmu);
return ret;
}
@@ -321,7 +321,7 @@ static void fsl_emb_pmu_disable(struct perf_event *event)
struct cpu_hw_events *cpuhw;
int i = event->hw.idx;
perf_disable();
perf_pmu_disable(event->pmu);
if (i < 0)
goto out;
@@ -349,7 +349,7 @@ static void fsl_emb_pmu_disable(struct perf_event *event)
cpuhw->n_events--;
out:
perf_enable();
perf_pmu_enable(event->pmu);
put_cpu_var(cpu_hw_events);
}
@@ -367,7 +367,7 @@ static void fsl_emb_pmu_unthrottle(struct perf_event *event)
if (event->hw.idx < 0 || !event->hw.sample_period)
return;
local_irq_save(flags);
perf_disable();
perf_pmu_disable(event->pmu);
fsl_emb_pmu_read(event);
left = event->hw.sample_period;
event->hw.last_period = left;
@@ -378,7 +378,7 @@ static void fsl_emb_pmu_unthrottle(struct perf_event *event)
local64_set(&event->hw.prev_count, val);
local64_set(&event->hw.period_left, left);
perf_event_update_userpage(event);
perf_enable();
perf_pmu_enable(event->pmu);
local_irq_restore(flags);
}
@@ -524,6 +524,8 @@ static int fsl_emb_pmu_event_init(struct perf_event *event)
}
static struct pmu fsl_emb_pmu = {
.pmu_enable = fsl_emb_pmu_pmu_enable,
.pmu_disable = fsl_emb_pmu_pmu_disable,
.event_init = fsl_emb_pmu_event_init,
.enable = fsl_emb_pmu_enable,
.disable = fsl_emb_pmu_disable,