Merge branch 'irq-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull irq fixes from Thomas Gleixner:
 "A slighlty large fix for a subtle issue in the CPU hotplug code of
  certain ARM SoCs, where the not yet online cpu needs to setup the cpu
  local timer and needs to set the interrupt affinity to itself.
  Setting interrupt affinity to a not online cpu is prohibited and
  therefor the timer interrupt ends up on the wrong cpu, which leads to
  nasty complications.

  The SoC folks tried to hack around that in the SoC code in some more
  than nasty ways.  The proper solution is to have a way to enforce the
  affinity setting to a not online cpu.  The core patch to the genirq
  code provides that facility and the follow up patches make use of it
  in the GIC interrupt controller and the exynos timer driver.

  The change to the core code has no implications to existing users,
  except for the rename of the locked function and therefor the
  necessary fixup in mips/cavium.  Aside of that, no runtime impact is
  possible, as none of the existing interrupt chips implements anything
  which depends on the force argument of the irq_set_affinity()
  callback"

* 'irq-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  clocksource: Exynos_mct: Register clock event after request_irq()
  clocksource: Exynos_mct: Use irq_force_affinity() in cpu bringup
  irqchip: Gic: Support forced affinity setting
  genirq: Allow forcing cpu affinity of interrupts
This commit is contained in:
Linus Torvalds 2014-04-27 11:21:03 -07:00
commit d9e9e8e2fe
6 changed files with 52 additions and 25 deletions

View File

@ -635,7 +635,7 @@ static void octeon_irq_cpu_offline_ciu(struct irq_data *data)
cpumask_clear(&new_affinity);
cpumask_set_cpu(cpumask_first(cpu_online_mask), &new_affinity);
}
__irq_set_affinity_locked(data, &new_affinity);
irq_set_affinity_locked(data, &new_affinity, false);
}
static int octeon_irq_ciu_set_affinity(struct irq_data *data,

View File

@ -416,8 +416,6 @@ static int exynos4_local_timer_setup(struct clock_event_device *evt)
evt->set_mode = exynos4_tick_set_mode;
evt->features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT;
evt->rating = 450;
clockevents_config_and_register(evt, clk_rate / (TICK_BASE_CNT + 1),
0xf, 0x7fffffff);
exynos4_mct_write(TICK_BASE_CNT, mevt->base + MCT_L_TCNTB_OFFSET);
@ -430,9 +428,12 @@ static int exynos4_local_timer_setup(struct clock_event_device *evt)
evt->irq);
return -EIO;
}
irq_force_affinity(mct_irqs[MCT_L0_IRQ + cpu], cpumask_of(cpu));
} else {
enable_percpu_irq(mct_irqs[MCT_L0_IRQ], 0);
}
clockevents_config_and_register(evt, clk_rate / (TICK_BASE_CNT + 1),
0xf, 0x7fffffff);
return 0;
}
@ -450,7 +451,6 @@ static int exynos4_mct_cpu_notify(struct notifier_block *self,
unsigned long action, void *hcpu)
{
struct mct_clock_event_device *mevt;
unsigned int cpu;
/*
* Grab cpu pointer in each case to avoid spurious
@ -461,12 +461,6 @@ static int exynos4_mct_cpu_notify(struct notifier_block *self,
mevt = this_cpu_ptr(&percpu_mct_tick);
exynos4_local_timer_setup(&mevt->evt);
break;
case CPU_ONLINE:
cpu = (unsigned long)hcpu;
if (mct_int_type == MCT_INT_SPI)
irq_set_affinity(mct_irqs[MCT_L0_IRQ + cpu],
cpumask_of(cpu));
break;
case CPU_DYING:
mevt = this_cpu_ptr(&percpu_mct_tick);
exynos4_local_timer_stop(&mevt->evt);

View File

@ -246,10 +246,14 @@ static int gic_set_affinity(struct irq_data *d, const struct cpumask *mask_val,
bool force)
{
void __iomem *reg = gic_dist_base(d) + GIC_DIST_TARGET + (gic_irq(d) & ~3);
unsigned int shift = (gic_irq(d) % 4) * 8;
unsigned int cpu = cpumask_any_and(mask_val, cpu_online_mask);
unsigned int cpu, shift = (gic_irq(d) % 4) * 8;
u32 val, mask, bit;
if (!force)
cpu = cpumask_any_and(mask_val, cpu_online_mask);
else
cpu = cpumask_first(mask_val);
if (cpu >= NR_GIC_CPU_IF || cpu >= nr_cpu_ids)
return -EINVAL;

View File

@ -203,7 +203,40 @@ static inline int check_wakeup_irqs(void) { return 0; }
extern cpumask_var_t irq_default_affinity;
extern int irq_set_affinity(unsigned int irq, const struct cpumask *cpumask);
/* Internal implementation. Use the helpers below */
extern int __irq_set_affinity(unsigned int irq, const struct cpumask *cpumask,
bool force);
/**
* irq_set_affinity - Set the irq affinity of a given irq
* @irq: Interrupt to set affinity
* @mask: cpumask
*
* Fails if cpumask does not contain an online CPU
*/
static inline int
irq_set_affinity(unsigned int irq, const struct cpumask *cpumask)
{
return __irq_set_affinity(irq, cpumask, false);
}
/**
* irq_force_affinity - Force the irq affinity of a given irq
* @irq: Interrupt to set affinity
* @mask: cpumask
*
* Same as irq_set_affinity, but without checking the mask against
* online cpus.
*
* Solely for low level cpu hotplug code, where we need to make per
* cpu interrupts affine before the cpu becomes online.
*/
static inline int
irq_force_affinity(unsigned int irq, const struct cpumask *cpumask)
{
return __irq_set_affinity(irq, cpumask, true);
}
extern int irq_can_set_affinity(unsigned int irq);
extern int irq_select_affinity(unsigned int irq);

View File

@ -394,7 +394,8 @@ extern void remove_percpu_irq(unsigned int irq, struct irqaction *act);
extern void irq_cpu_online(void);
extern void irq_cpu_offline(void);
extern int __irq_set_affinity_locked(struct irq_data *data, const struct cpumask *cpumask);
extern int irq_set_affinity_locked(struct irq_data *data,
const struct cpumask *cpumask, bool force);
#if defined(CONFIG_SMP) && defined(CONFIG_GENERIC_PENDING_IRQ)
void irq_move_irq(struct irq_data *data);

View File

@ -180,7 +180,7 @@ int irq_do_set_affinity(struct irq_data *data, const struct cpumask *mask,
struct irq_chip *chip = irq_data_get_irq_chip(data);
int ret;
ret = chip->irq_set_affinity(data, mask, false);
ret = chip->irq_set_affinity(data, mask, force);
switch (ret) {
case IRQ_SET_MASK_OK:
cpumask_copy(data->affinity, mask);
@ -192,7 +192,8 @@ int irq_do_set_affinity(struct irq_data *data, const struct cpumask *mask,
return ret;
}
int __irq_set_affinity_locked(struct irq_data *data, const struct cpumask *mask)
int irq_set_affinity_locked(struct irq_data *data, const struct cpumask *mask,
bool force)
{
struct irq_chip *chip = irq_data_get_irq_chip(data);
struct irq_desc *desc = irq_data_to_desc(data);
@ -202,7 +203,7 @@ int __irq_set_affinity_locked(struct irq_data *data, const struct cpumask *mask)
return -EINVAL;
if (irq_can_move_pcntxt(data)) {
ret = irq_do_set_affinity(data, mask, false);
ret = irq_do_set_affinity(data, mask, force);
} else {
irqd_set_move_pending(data);
irq_copy_pending(desc, mask);
@ -217,13 +218,7 @@ int __irq_set_affinity_locked(struct irq_data *data, const struct cpumask *mask)
return ret;
}
/**
* irq_set_affinity - Set the irq affinity of a given irq
* @irq: Interrupt to set affinity
* @mask: cpumask
*
*/
int irq_set_affinity(unsigned int irq, const struct cpumask *mask)
int __irq_set_affinity(unsigned int irq, const struct cpumask *mask, bool force)
{
struct irq_desc *desc = irq_to_desc(irq);
unsigned long flags;
@ -233,7 +228,7 @@ int irq_set_affinity(unsigned int irq, const struct cpumask *mask)
return -EINVAL;
raw_spin_lock_irqsave(&desc->lock, flags);
ret = __irq_set_affinity_locked(irq_desc_get_irq_data(desc), mask);
ret = irq_set_affinity_locked(irq_desc_get_irq_data(desc), mask, force);
raw_spin_unlock_irqrestore(&desc->lock, flags);
return ret;
}