Merge tag 'v3.3-rc7' into sched/core

Merge reason: merge back final fixes, prepare for the merge window.

Signed-off-by: Ingo Molnar <mingo@elte.hu>
This commit is contained in:
Ingo Molnar
2012-03-13 16:26:52 +01:00
302 changed files with 1921 additions and 984 deletions

View File

@ -242,6 +242,7 @@ static void ecard_init_pgtables(struct mm_struct *mm)
memcpy(dst_pgd, src_pgd, sizeof(pgd_t) * (EASI_SIZE / PGDIR_SIZE));
vma.vm_flags = VM_EXEC;
vma.vm_mm = mm;
flush_tlb_range(&vma, IO_START, IO_START + IO_SIZE);

View File

@ -180,7 +180,7 @@ armpmu_event_set_period(struct perf_event *event,
u64
armpmu_event_update(struct perf_event *event,
struct hw_perf_event *hwc,
int idx, int overflow)
int idx)
{
struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
u64 delta, prev_raw_count, new_raw_count;
@ -193,13 +193,7 @@ again:
new_raw_count) != prev_raw_count)
goto again;
new_raw_count &= armpmu->max_period;
prev_raw_count &= armpmu->max_period;
if (overflow)
delta = armpmu->max_period - prev_raw_count + new_raw_count + 1;
else
delta = new_raw_count - prev_raw_count;
delta = (new_raw_count - prev_raw_count) & armpmu->max_period;
local64_add(delta, &event->count);
local64_sub(delta, &hwc->period_left);
@ -216,7 +210,7 @@ armpmu_read(struct perf_event *event)
if (hwc->idx < 0)
return;
armpmu_event_update(event, hwc, hwc->idx, 0);
armpmu_event_update(event, hwc, hwc->idx);
}
static void
@ -232,7 +226,7 @@ armpmu_stop(struct perf_event *event, int flags)
if (!(hwc->state & PERF_HES_STOPPED)) {
armpmu->disable(hwc, hwc->idx);
barrier(); /* why? */
armpmu_event_update(event, hwc, hwc->idx, 0);
armpmu_event_update(event, hwc, hwc->idx);
hwc->state |= PERF_HES_STOPPED | PERF_HES_UPTODATE;
}
}
@ -518,7 +512,13 @@ __hw_perf_event_init(struct perf_event *event)
hwc->config_base |= (unsigned long)mapping;
if (!hwc->sample_period) {
hwc->sample_period = armpmu->max_period;
/*
* For non-sampling runs, limit the sample_period to half
* of the counter width. That way, the new counter value
* is far less likely to overtake the previous one unless
* you have some serious IRQ latency issues.
*/
hwc->sample_period = armpmu->max_period >> 1;
hwc->last_period = hwc->sample_period;
local64_set(&hwc->period_left, hwc->sample_period);
}
@ -679,6 +679,28 @@ static void __init cpu_pmu_init(struct arm_pmu *armpmu)
armpmu->type = ARM_PMU_DEVICE_CPU;
}
/*
* PMU hardware loses all context when a CPU goes offline.
* When a CPU is hotplugged back in, since some hardware registers are
* UNKNOWN at reset, the PMU must be explicitly reset to avoid reading
* junk values out of them.
*/
static int __cpuinit pmu_cpu_notify(struct notifier_block *b,
unsigned long action, void *hcpu)
{
if ((action & ~CPU_TASKS_FROZEN) != CPU_STARTING)
return NOTIFY_DONE;
if (cpu_pmu && cpu_pmu->reset)
cpu_pmu->reset(NULL);
return NOTIFY_OK;
}
static struct notifier_block __cpuinitdata pmu_cpu_notifier = {
.notifier_call = pmu_cpu_notify,
};
/*
* CPU PMU identification and registration.
*/
@ -730,6 +752,7 @@ init_hw_perf_events(void)
pr_info("enabled with %s PMU driver, %d counters available\n",
cpu_pmu->name, cpu_pmu->num_events);
cpu_pmu_init(cpu_pmu);
register_cpu_notifier(&pmu_cpu_notifier);
armpmu_register(cpu_pmu, "cpu", PERF_TYPE_RAW);
} else {
pr_info("no hardware support available\n");

View File

@ -467,23 +467,6 @@ armv6pmu_enable_event(struct hw_perf_event *hwc,
raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
}
static int counter_is_active(unsigned long pmcr, int idx)
{
unsigned long mask = 0;
if (idx == ARMV6_CYCLE_COUNTER)
mask = ARMV6_PMCR_CCOUNT_IEN;
else if (idx == ARMV6_COUNTER0)
mask = ARMV6_PMCR_COUNT0_IEN;
else if (idx == ARMV6_COUNTER1)
mask = ARMV6_PMCR_COUNT1_IEN;
if (mask)
return pmcr & mask;
WARN_ONCE(1, "invalid counter number (%d)\n", idx);
return 0;
}
static irqreturn_t
armv6pmu_handle_irq(int irq_num,
void *dev)
@ -513,7 +496,8 @@ armv6pmu_handle_irq(int irq_num,
struct perf_event *event = cpuc->events[idx];
struct hw_perf_event *hwc;
if (!counter_is_active(pmcr, idx))
/* Ignore if we don't have an event. */
if (!event)
continue;
/*
@ -524,7 +508,7 @@ armv6pmu_handle_irq(int irq_num,
continue;
hwc = &event->hw;
armpmu_event_update(event, hwc, idx, 1);
armpmu_event_update(event, hwc, idx);
data.period = event->hw.last_period;
if (!armpmu_event_set_period(event, hwc, idx))
continue;

View File

@ -809,6 +809,11 @@ static inline int armv7_pmnc_disable_intens(int idx)
counter = ARMV7_IDX_TO_COUNTER(idx);
asm volatile("mcr p15, 0, %0, c9, c14, 2" : : "r" (BIT(counter)));
isb();
/* Clear the overflow flag in case an interrupt is pending. */
asm volatile("mcr p15, 0, %0, c9, c12, 3" : : "r" (BIT(counter)));
isb();
return idx;
}
@ -955,6 +960,10 @@ static irqreturn_t armv7pmu_handle_irq(int irq_num, void *dev)
struct perf_event *event = cpuc->events[idx];
struct hw_perf_event *hwc;
/* Ignore if we don't have an event. */
if (!event)
continue;
/*
* We have a single interrupt for all counters. Check that
* each counter has overflowed before we process it.
@ -963,7 +972,7 @@ static irqreturn_t armv7pmu_handle_irq(int irq_num, void *dev)
continue;
hwc = &event->hw;
armpmu_event_update(event, hwc, idx, 1);
armpmu_event_update(event, hwc, idx);
data.period = event->hw.last_period;
if (!armpmu_event_set_period(event, hwc, idx))
continue;

View File

@ -255,11 +255,14 @@ xscale1pmu_handle_irq(int irq_num, void *dev)
struct perf_event *event = cpuc->events[idx];
struct hw_perf_event *hwc;
if (!event)
continue;
if (!xscale1_pmnc_counter_has_overflowed(pmnc, idx))
continue;
hwc = &event->hw;
armpmu_event_update(event, hwc, idx, 1);
armpmu_event_update(event, hwc, idx);
data.period = event->hw.last_period;
if (!armpmu_event_set_period(event, hwc, idx))
continue;
@ -592,11 +595,14 @@ xscale2pmu_handle_irq(int irq_num, void *dev)
struct perf_event *event = cpuc->events[idx];
struct hw_perf_event *hwc;
if (!xscale2_pmnc_counter_has_overflowed(pmnc, idx))
if (!event)
continue;
if (!xscale2_pmnc_counter_has_overflowed(of_flags, idx))
continue;
hwc = &event->hw;
armpmu_event_update(event, hwc, idx, 1);
armpmu_event_update(event, hwc, idx);
data.period = event->hw.last_period;
if (!armpmu_event_set_period(event, hwc, idx))
continue;
@ -663,7 +669,7 @@ xscale2pmu_enable_event(struct hw_perf_event *hwc, int idx)
static void
xscale2pmu_disable_event(struct hw_perf_event *hwc, int idx)
{
unsigned long flags, ien, evtsel;
unsigned long flags, ien, evtsel, of_flags;
struct pmu_hw_events *events = cpu_pmu->get_hw_events();
ien = xscale2pmu_read_int_enable();
@ -672,26 +678,31 @@ xscale2pmu_disable_event(struct hw_perf_event *hwc, int idx)
switch (idx) {
case XSCALE_CYCLE_COUNTER:
ien &= ~XSCALE2_CCOUNT_INT_EN;
of_flags = XSCALE2_CCOUNT_OVERFLOW;
break;
case XSCALE_COUNTER0:
ien &= ~XSCALE2_COUNT0_INT_EN;
evtsel &= ~XSCALE2_COUNT0_EVT_MASK;
evtsel |= XSCALE_PERFCTR_UNUSED << XSCALE2_COUNT0_EVT_SHFT;
of_flags = XSCALE2_COUNT0_OVERFLOW;
break;
case XSCALE_COUNTER1:
ien &= ~XSCALE2_COUNT1_INT_EN;
evtsel &= ~XSCALE2_COUNT1_EVT_MASK;
evtsel |= XSCALE_PERFCTR_UNUSED << XSCALE2_COUNT1_EVT_SHFT;
of_flags = XSCALE2_COUNT1_OVERFLOW;
break;
case XSCALE_COUNTER2:
ien &= ~XSCALE2_COUNT2_INT_EN;
evtsel &= ~XSCALE2_COUNT2_EVT_MASK;
evtsel |= XSCALE_PERFCTR_UNUSED << XSCALE2_COUNT2_EVT_SHFT;
of_flags = XSCALE2_COUNT2_OVERFLOW;
break;
case XSCALE_COUNTER3:
ien &= ~XSCALE2_COUNT3_INT_EN;
evtsel &= ~XSCALE2_COUNT3_EVT_MASK;
evtsel |= XSCALE_PERFCTR_UNUSED << XSCALE2_COUNT3_EVT_SHFT;
of_flags = XSCALE2_COUNT3_OVERFLOW;
break;
default:
WARN_ONCE(1, "invalid counter number (%d)\n", idx);
@ -701,6 +712,7 @@ xscale2pmu_disable_event(struct hw_perf_event *hwc, int idx)
raw_spin_lock_irqsave(&events->pmu_lock, flags);
xscale2pmu_write_event_select(evtsel);
xscale2pmu_write_int_enable(ien);
xscale2pmu_write_overflow_flags(of_flags);
raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
}