Merge branch 'for-2.6.38' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/percpu
* 'for-2.6.38' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/percpu: (30 commits) gameport: use this_cpu_read instead of lookup x86: udelay: Use this_cpu_read to avoid address calculation x86: Use this_cpu_inc_return for nmi counter x86: Replace uses of current_cpu_data with this_cpu ops x86: Use this_cpu_ops to optimize code vmstat: User per cpu atomics to avoid interrupt disable / enable irq_work: Use per cpu atomics instead of regular atomics cpuops: Use cmpxchg for xchg to avoid lock semantics x86: this_cpu_cmpxchg and this_cpu_xchg operations percpu: Generic this_cpu_cmpxchg() and this_cpu_xchg support percpu,x86: relocate this_cpu_add_return() and friends connector: Use this_cpu operations xen: Use this_cpu_inc_return taskstats: Use this_cpu_ops random: Use this_cpu_inc_return fs: Use this_cpu_inc_return in buffer.c highmem: Use this_cpu_xx_return() operations vmstat: Use this_cpu_inc_return for vm statistics x86: Support for this_cpu_add, sub, dec, inc_return percpu: Generic support for this_cpu_add, sub, dec, inc_return ... Fixed up conflicts: in arch/x86/kernel/{apic/nmi.c, apic/x2apic_uv_x.c, process.c} as per Tejun.
This commit is contained in:
@@ -997,8 +997,7 @@ x86_perf_event_set_period(struct perf_event *event)
|
||||
|
||||
static void x86_pmu_enable_event(struct perf_event *event)
|
||||
{
|
||||
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
|
||||
if (cpuc->enabled)
|
||||
if (__this_cpu_read(cpu_hw_events.enabled))
|
||||
__x86_pmu_enable_event(&event->hw,
|
||||
ARCH_PERFMON_EVENTSEL_ENABLE);
|
||||
}
|
||||
@@ -1272,7 +1271,7 @@ perf_event_nmi_handler(struct notifier_block *self,
|
||||
break;
|
||||
case DIE_NMIUNKNOWN:
|
||||
this_nmi = percpu_read(irq_stat.__nmi_count);
|
||||
if (this_nmi != __get_cpu_var(pmu_nmi).marked)
|
||||
if (this_nmi != __this_cpu_read(pmu_nmi.marked))
|
||||
/* let the kernel handle the unknown nmi */
|
||||
return NOTIFY_DONE;
|
||||
/*
|
||||
@@ -1296,8 +1295,8 @@ perf_event_nmi_handler(struct notifier_block *self,
|
||||
this_nmi = percpu_read(irq_stat.__nmi_count);
|
||||
if ((handled > 1) ||
|
||||
/* the next nmi could be a back-to-back nmi */
|
||||
((__get_cpu_var(pmu_nmi).marked == this_nmi) &&
|
||||
(__get_cpu_var(pmu_nmi).handled > 1))) {
|
||||
((__this_cpu_read(pmu_nmi.marked) == this_nmi) &&
|
||||
(__this_cpu_read(pmu_nmi.handled) > 1))) {
|
||||
/*
|
||||
* We could have two subsequent back-to-back nmis: The
|
||||
* first handles more than one counter, the 2nd
|
||||
@@ -1308,8 +1307,8 @@ perf_event_nmi_handler(struct notifier_block *self,
|
||||
* handling more than one counter. We will mark the
|
||||
* next (3rd) and then drop it if unhandled.
|
||||
*/
|
||||
__get_cpu_var(pmu_nmi).marked = this_nmi + 1;
|
||||
__get_cpu_var(pmu_nmi).handled = handled;
|
||||
__this_cpu_write(pmu_nmi.marked, this_nmi + 1);
|
||||
__this_cpu_write(pmu_nmi.handled, handled);
|
||||
}
|
||||
|
||||
return NOTIFY_STOP;
|
||||
@@ -1484,11 +1483,9 @@ static inline void x86_pmu_read(struct perf_event *event)
|
||||
*/
|
||||
static void x86_pmu_start_txn(struct pmu *pmu)
|
||||
{
|
||||
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
|
||||
|
||||
perf_pmu_disable(pmu);
|
||||
cpuc->group_flag |= PERF_EVENT_TXN;
|
||||
cpuc->n_txn = 0;
|
||||
__this_cpu_or(cpu_hw_events.group_flag, PERF_EVENT_TXN);
|
||||
__this_cpu_write(cpu_hw_events.n_txn, 0);
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -1498,14 +1495,12 @@ static void x86_pmu_start_txn(struct pmu *pmu)
|
||||
*/
|
||||
static void x86_pmu_cancel_txn(struct pmu *pmu)
|
||||
{
|
||||
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
|
||||
|
||||
cpuc->group_flag &= ~PERF_EVENT_TXN;
|
||||
__this_cpu_and(cpu_hw_events.group_flag, ~PERF_EVENT_TXN);
|
||||
/*
|
||||
* Truncate the collected events.
|
||||
*/
|
||||
cpuc->n_added -= cpuc->n_txn;
|
||||
cpuc->n_events -= cpuc->n_txn;
|
||||
__this_cpu_sub(cpu_hw_events.n_added, __this_cpu_read(cpu_hw_events.n_txn));
|
||||
__this_cpu_sub(cpu_hw_events.n_events, __this_cpu_read(cpu_hw_events.n_txn));
|
||||
perf_pmu_enable(pmu);
|
||||
}
|
||||
|
||||
|
Reference in New Issue
Block a user