Merge branch 'fixes-2.6.39' into for-2.6.40
This commit is contained in:
@@ -31,19 +31,19 @@
|
||||
|
||||
static int i8259A_auto_eoi = -1;
|
||||
DEFINE_RAW_SPINLOCK(i8259A_lock);
|
||||
static void disable_8259A_irq(unsigned int irq);
|
||||
static void enable_8259A_irq(unsigned int irq);
|
||||
static void mask_and_ack_8259A(unsigned int irq);
|
||||
static void disable_8259A_irq(struct irq_data *d);
|
||||
static void enable_8259A_irq(struct irq_data *d);
|
||||
static void mask_and_ack_8259A(struct irq_data *d);
|
||||
static void init_8259A(int auto_eoi);
|
||||
|
||||
static struct irq_chip i8259A_chip = {
|
||||
.name = "XT-PIC",
|
||||
.mask = disable_8259A_irq,
|
||||
.disable = disable_8259A_irq,
|
||||
.unmask = enable_8259A_irq,
|
||||
.mask_ack = mask_and_ack_8259A,
|
||||
.name = "XT-PIC",
|
||||
.irq_mask = disable_8259A_irq,
|
||||
.irq_disable = disable_8259A_irq,
|
||||
.irq_unmask = enable_8259A_irq,
|
||||
.irq_mask_ack = mask_and_ack_8259A,
|
||||
#ifdef CONFIG_MIPS_MT_SMTC_IRQAFF
|
||||
.set_affinity = plat_set_irq_affinity,
|
||||
.irq_set_affinity = plat_set_irq_affinity,
|
||||
#endif /* CONFIG_MIPS_MT_SMTC_IRQAFF */
|
||||
};
|
||||
|
||||
@@ -59,12 +59,11 @@ static unsigned int cached_irq_mask = 0xffff;
|
||||
#define cached_master_mask (cached_irq_mask)
|
||||
#define cached_slave_mask (cached_irq_mask >> 8)
|
||||
|
||||
static void disable_8259A_irq(unsigned int irq)
|
||||
static void disable_8259A_irq(struct irq_data *d)
|
||||
{
|
||||
unsigned int mask;
|
||||
unsigned int mask, irq = d->irq - I8259A_IRQ_BASE;
|
||||
unsigned long flags;
|
||||
|
||||
irq -= I8259A_IRQ_BASE;
|
||||
mask = 1 << irq;
|
||||
raw_spin_lock_irqsave(&i8259A_lock, flags);
|
||||
cached_irq_mask |= mask;
|
||||
@@ -75,12 +74,11 @@ static void disable_8259A_irq(unsigned int irq)
|
||||
raw_spin_unlock_irqrestore(&i8259A_lock, flags);
|
||||
}
|
||||
|
||||
static void enable_8259A_irq(unsigned int irq)
|
||||
static void enable_8259A_irq(struct irq_data *d)
|
||||
{
|
||||
unsigned int mask;
|
||||
unsigned int mask, irq = d->irq - I8259A_IRQ_BASE;
|
||||
unsigned long flags;
|
||||
|
||||
irq -= I8259A_IRQ_BASE;
|
||||
mask = ~(1 << irq);
|
||||
raw_spin_lock_irqsave(&i8259A_lock, flags);
|
||||
cached_irq_mask &= mask;
|
||||
@@ -145,12 +143,11 @@ static inline int i8259A_irq_real(unsigned int irq)
|
||||
* first, _then_ send the EOI, and the order of EOI
|
||||
* to the two 8259s is important!
|
||||
*/
|
||||
static void mask_and_ack_8259A(unsigned int irq)
|
||||
static void mask_and_ack_8259A(struct irq_data *d)
|
||||
{
|
||||
unsigned int irqmask;
|
||||
unsigned int irqmask, irq = d->irq - I8259A_IRQ_BASE;
|
||||
unsigned long flags;
|
||||
|
||||
irq -= I8259A_IRQ_BASE;
|
||||
irqmask = 1 << irq;
|
||||
raw_spin_lock_irqsave(&i8259A_lock, flags);
|
||||
/*
|
||||
@@ -290,9 +287,9 @@ static void init_8259A(int auto_eoi)
|
||||
* In AEOI mode we just have to mask the interrupt
|
||||
* when acking.
|
||||
*/
|
||||
i8259A_chip.mask_ack = disable_8259A_irq;
|
||||
i8259A_chip.irq_mask_ack = disable_8259A_irq;
|
||||
else
|
||||
i8259A_chip.mask_ack = mask_and_ack_8259A;
|
||||
i8259A_chip.irq_mask_ack = mask_and_ack_8259A;
|
||||
|
||||
udelay(100); /* wait for 8259A to initialize */
|
||||
|
||||
|
@@ -87,17 +87,10 @@ unsigned int gic_get_int(void)
|
||||
return i;
|
||||
}
|
||||
|
||||
static unsigned int gic_irq_startup(unsigned int irq)
|
||||
static void gic_irq_ack(struct irq_data *d)
|
||||
{
|
||||
irq -= _irqbase;
|
||||
pr_debug("CPU%d: %s: irq%d\n", smp_processor_id(), __func__, irq);
|
||||
GIC_SET_INTR_MASK(irq);
|
||||
return 0;
|
||||
}
|
||||
unsigned int irq = d->irq - _irqbase;
|
||||
|
||||
static void gic_irq_ack(unsigned int irq)
|
||||
{
|
||||
irq -= _irqbase;
|
||||
pr_debug("CPU%d: %s: irq%d\n", smp_processor_id(), __func__, irq);
|
||||
GIC_CLR_INTR_MASK(irq);
|
||||
|
||||
@@ -105,16 +98,16 @@ static void gic_irq_ack(unsigned int irq)
|
||||
GICWRITE(GIC_REG(SHARED, GIC_SH_WEDGE), irq);
|
||||
}
|
||||
|
||||
static void gic_mask_irq(unsigned int irq)
|
||||
static void gic_mask_irq(struct irq_data *d)
|
||||
{
|
||||
irq -= _irqbase;
|
||||
unsigned int irq = d->irq - _irqbase;
|
||||
pr_debug("CPU%d: %s: irq%d\n", smp_processor_id(), __func__, irq);
|
||||
GIC_CLR_INTR_MASK(irq);
|
||||
}
|
||||
|
||||
static void gic_unmask_irq(unsigned int irq)
|
||||
static void gic_unmask_irq(struct irq_data *d)
|
||||
{
|
||||
irq -= _irqbase;
|
||||
unsigned int irq = d->irq - _irqbase;
|
||||
pr_debug("CPU%d: %s: irq%d\n", smp_processor_id(), __func__, irq);
|
||||
GIC_SET_INTR_MASK(irq);
|
||||
}
|
||||
@@ -123,13 +116,14 @@ static void gic_unmask_irq(unsigned int irq)
|
||||
|
||||
static DEFINE_SPINLOCK(gic_lock);
|
||||
|
||||
static int gic_set_affinity(unsigned int irq, const struct cpumask *cpumask)
|
||||
static int gic_set_affinity(struct irq_data *d, const struct cpumask *cpumask,
|
||||
bool force)
|
||||
{
|
||||
unsigned int irq = d->irq - _irqbase;
|
||||
cpumask_t tmp = CPU_MASK_NONE;
|
||||
unsigned long flags;
|
||||
int i;
|
||||
|
||||
irq -= _irqbase;
|
||||
pr_debug("%s(%d) called\n", __func__, irq);
|
||||
cpumask_and(&tmp, cpumask, cpu_online_mask);
|
||||
if (cpus_empty(tmp))
|
||||
@@ -147,23 +141,22 @@ static int gic_set_affinity(unsigned int irq, const struct cpumask *cpumask)
|
||||
set_bit(irq, pcpu_masks[first_cpu(tmp)].pcpu_mask);
|
||||
|
||||
}
|
||||
cpumask_copy(irq_desc[irq].affinity, cpumask);
|
||||
cpumask_copy(d->affinity, cpumask);
|
||||
spin_unlock_irqrestore(&gic_lock, flags);
|
||||
|
||||
return 0;
|
||||
return IRQ_SET_MASK_OK_NOCOPY;
|
||||
}
|
||||
#endif
|
||||
|
||||
static struct irq_chip gic_irq_controller = {
|
||||
.name = "MIPS GIC",
|
||||
.startup = gic_irq_startup,
|
||||
.ack = gic_irq_ack,
|
||||
.mask = gic_mask_irq,
|
||||
.mask_ack = gic_mask_irq,
|
||||
.unmask = gic_unmask_irq,
|
||||
.eoi = gic_unmask_irq,
|
||||
.name = "MIPS GIC",
|
||||
.irq_ack = gic_irq_ack,
|
||||
.irq_mask = gic_mask_irq,
|
||||
.irq_mask_ack = gic_mask_irq,
|
||||
.irq_unmask = gic_unmask_irq,
|
||||
.irq_eoi = gic_unmask_irq,
|
||||
#ifdef CONFIG_SMP
|
||||
.set_affinity = gic_set_affinity,
|
||||
.irq_set_affinity = gic_set_affinity,
|
||||
#endif
|
||||
};
|
||||
|
||||
|
@@ -29,64 +29,64 @@
|
||||
|
||||
static DEFINE_RAW_SPINLOCK(gt641xx_irq_lock);
|
||||
|
||||
static void ack_gt641xx_irq(unsigned int irq)
|
||||
static void ack_gt641xx_irq(struct irq_data *d)
|
||||
{
|
||||
unsigned long flags;
|
||||
u32 cause;
|
||||
|
||||
raw_spin_lock_irqsave(>641xx_irq_lock, flags);
|
||||
cause = GT_READ(GT_INTRCAUSE_OFS);
|
||||
cause &= ~GT641XX_IRQ_TO_BIT(irq);
|
||||
cause &= ~GT641XX_IRQ_TO_BIT(d->irq);
|
||||
GT_WRITE(GT_INTRCAUSE_OFS, cause);
|
||||
raw_spin_unlock_irqrestore(>641xx_irq_lock, flags);
|
||||
}
|
||||
|
||||
static void mask_gt641xx_irq(unsigned int irq)
|
||||
static void mask_gt641xx_irq(struct irq_data *d)
|
||||
{
|
||||
unsigned long flags;
|
||||
u32 mask;
|
||||
|
||||
raw_spin_lock_irqsave(>641xx_irq_lock, flags);
|
||||
mask = GT_READ(GT_INTRMASK_OFS);
|
||||
mask &= ~GT641XX_IRQ_TO_BIT(irq);
|
||||
mask &= ~GT641XX_IRQ_TO_BIT(d->irq);
|
||||
GT_WRITE(GT_INTRMASK_OFS, mask);
|
||||
raw_spin_unlock_irqrestore(>641xx_irq_lock, flags);
|
||||
}
|
||||
|
||||
static void mask_ack_gt641xx_irq(unsigned int irq)
|
||||
static void mask_ack_gt641xx_irq(struct irq_data *d)
|
||||
{
|
||||
unsigned long flags;
|
||||
u32 cause, mask;
|
||||
|
||||
raw_spin_lock_irqsave(>641xx_irq_lock, flags);
|
||||
mask = GT_READ(GT_INTRMASK_OFS);
|
||||
mask &= ~GT641XX_IRQ_TO_BIT(irq);
|
||||
mask &= ~GT641XX_IRQ_TO_BIT(d->irq);
|
||||
GT_WRITE(GT_INTRMASK_OFS, mask);
|
||||
|
||||
cause = GT_READ(GT_INTRCAUSE_OFS);
|
||||
cause &= ~GT641XX_IRQ_TO_BIT(irq);
|
||||
cause &= ~GT641XX_IRQ_TO_BIT(d->irq);
|
||||
GT_WRITE(GT_INTRCAUSE_OFS, cause);
|
||||
raw_spin_unlock_irqrestore(>641xx_irq_lock, flags);
|
||||
}
|
||||
|
||||
static void unmask_gt641xx_irq(unsigned int irq)
|
||||
static void unmask_gt641xx_irq(struct irq_data *d)
|
||||
{
|
||||
unsigned long flags;
|
||||
u32 mask;
|
||||
|
||||
raw_spin_lock_irqsave(>641xx_irq_lock, flags);
|
||||
mask = GT_READ(GT_INTRMASK_OFS);
|
||||
mask |= GT641XX_IRQ_TO_BIT(irq);
|
||||
mask |= GT641XX_IRQ_TO_BIT(d->irq);
|
||||
GT_WRITE(GT_INTRMASK_OFS, mask);
|
||||
raw_spin_unlock_irqrestore(>641xx_irq_lock, flags);
|
||||
}
|
||||
|
||||
static struct irq_chip gt641xx_irq_chip = {
|
||||
.name = "GT641xx",
|
||||
.ack = ack_gt641xx_irq,
|
||||
.mask = mask_gt641xx_irq,
|
||||
.mask_ack = mask_ack_gt641xx_irq,
|
||||
.unmask = unmask_gt641xx_irq,
|
||||
.irq_ack = ack_gt641xx_irq,
|
||||
.irq_mask = mask_gt641xx_irq,
|
||||
.irq_mask_ack = mask_ack_gt641xx_irq,
|
||||
.irq_unmask = unmask_gt641xx_irq,
|
||||
};
|
||||
|
||||
void gt641xx_irq_dispatch(void)
|
||||
|
@@ -28,8 +28,10 @@ static unsigned long _icctrl_msc;
|
||||
static unsigned int irq_base;
|
||||
|
||||
/* mask off an interrupt */
|
||||
static inline void mask_msc_irq(unsigned int irq)
|
||||
static inline void mask_msc_irq(struct irq_data *d)
|
||||
{
|
||||
unsigned int irq = d->irq;
|
||||
|
||||
if (irq < (irq_base + 32))
|
||||
MSCIC_WRITE(MSC01_IC_DISL, 1<<(irq - irq_base));
|
||||
else
|
||||
@@ -37,8 +39,10 @@ static inline void mask_msc_irq(unsigned int irq)
|
||||
}
|
||||
|
||||
/* unmask an interrupt */
|
||||
static inline void unmask_msc_irq(unsigned int irq)
|
||||
static inline void unmask_msc_irq(struct irq_data *d)
|
||||
{
|
||||
unsigned int irq = d->irq;
|
||||
|
||||
if (irq < (irq_base + 32))
|
||||
MSCIC_WRITE(MSC01_IC_ENAL, 1<<(irq - irq_base));
|
||||
else
|
||||
@@ -48,9 +52,11 @@ static inline void unmask_msc_irq(unsigned int irq)
|
||||
/*
|
||||
* Masks and ACKs an IRQ
|
||||
*/
|
||||
static void level_mask_and_ack_msc_irq(unsigned int irq)
|
||||
static void level_mask_and_ack_msc_irq(struct irq_data *d)
|
||||
{
|
||||
mask_msc_irq(irq);
|
||||
unsigned int irq = d->irq;
|
||||
|
||||
mask_msc_irq(d);
|
||||
if (!cpu_has_veic)
|
||||
MSCIC_WRITE(MSC01_IC_EOI, 0);
|
||||
/* This actually needs to be a call into platform code */
|
||||
@@ -60,9 +66,11 @@ static void level_mask_and_ack_msc_irq(unsigned int irq)
|
||||
/*
|
||||
* Masks and ACKs an IRQ
|
||||
*/
|
||||
static void edge_mask_and_ack_msc_irq(unsigned int irq)
|
||||
static void edge_mask_and_ack_msc_irq(struct irq_data *d)
|
||||
{
|
||||
mask_msc_irq(irq);
|
||||
unsigned int irq = d->irq;
|
||||
|
||||
mask_msc_irq(d);
|
||||
if (!cpu_has_veic)
|
||||
MSCIC_WRITE(MSC01_IC_EOI, 0);
|
||||
else {
|
||||
@@ -74,15 +82,6 @@ static void edge_mask_and_ack_msc_irq(unsigned int irq)
|
||||
smtc_im_ack_irq(irq);
|
||||
}
|
||||
|
||||
/*
|
||||
* End IRQ processing
|
||||
*/
|
||||
static void end_msc_irq(unsigned int irq)
|
||||
{
|
||||
if (!(irq_desc[irq].status & (IRQ_DISABLED|IRQ_INPROGRESS)))
|
||||
unmask_msc_irq(irq);
|
||||
}
|
||||
|
||||
/*
|
||||
* Interrupt handler for interrupts coming from SOC-it.
|
||||
*/
|
||||
@@ -107,22 +106,20 @@ static void msc_bind_eic_interrupt(int irq, int set)
|
||||
|
||||
static struct irq_chip msc_levelirq_type = {
|
||||
.name = "SOC-it-Level",
|
||||
.ack = level_mask_and_ack_msc_irq,
|
||||
.mask = mask_msc_irq,
|
||||
.mask_ack = level_mask_and_ack_msc_irq,
|
||||
.unmask = unmask_msc_irq,
|
||||
.eoi = unmask_msc_irq,
|
||||
.end = end_msc_irq,
|
||||
.irq_ack = level_mask_and_ack_msc_irq,
|
||||
.irq_mask = mask_msc_irq,
|
||||
.irq_mask_ack = level_mask_and_ack_msc_irq,
|
||||
.irq_unmask = unmask_msc_irq,
|
||||
.irq_eoi = unmask_msc_irq,
|
||||
};
|
||||
|
||||
static struct irq_chip msc_edgeirq_type = {
|
||||
.name = "SOC-it-Edge",
|
||||
.ack = edge_mask_and_ack_msc_irq,
|
||||
.mask = mask_msc_irq,
|
||||
.mask_ack = edge_mask_and_ack_msc_irq,
|
||||
.unmask = unmask_msc_irq,
|
||||
.eoi = unmask_msc_irq,
|
||||
.end = end_msc_irq,
|
||||
.irq_ack = edge_mask_and_ack_msc_irq,
|
||||
.irq_mask = mask_msc_irq,
|
||||
.irq_mask_ack = edge_mask_and_ack_msc_irq,
|
||||
.irq_unmask = unmask_msc_irq,
|
||||
.irq_eoi = unmask_msc_irq,
|
||||
};
|
||||
|
||||
|
||||
|
@@ -18,23 +18,23 @@
|
||||
#include <asm/mipsregs.h>
|
||||
#include <asm/system.h>
|
||||
|
||||
static inline void unmask_rm7k_irq(unsigned int irq)
|
||||
static inline void unmask_rm7k_irq(struct irq_data *d)
|
||||
{
|
||||
set_c0_intcontrol(0x100 << (irq - RM7K_CPU_IRQ_BASE));
|
||||
set_c0_intcontrol(0x100 << (d->irq - RM7K_CPU_IRQ_BASE));
|
||||
}
|
||||
|
||||
static inline void mask_rm7k_irq(unsigned int irq)
|
||||
static inline void mask_rm7k_irq(struct irq_data *d)
|
||||
{
|
||||
clear_c0_intcontrol(0x100 << (irq - RM7K_CPU_IRQ_BASE));
|
||||
clear_c0_intcontrol(0x100 << (d->irq - RM7K_CPU_IRQ_BASE));
|
||||
}
|
||||
|
||||
static struct irq_chip rm7k_irq_controller = {
|
||||
.name = "RM7000",
|
||||
.ack = mask_rm7k_irq,
|
||||
.mask = mask_rm7k_irq,
|
||||
.mask_ack = mask_rm7k_irq,
|
||||
.unmask = unmask_rm7k_irq,
|
||||
.eoi = unmask_rm7k_irq
|
||||
.irq_ack = mask_rm7k_irq,
|
||||
.irq_mask = mask_rm7k_irq,
|
||||
.irq_mask_ack = mask_rm7k_irq,
|
||||
.irq_unmask = unmask_rm7k_irq,
|
||||
.irq_eoi = unmask_rm7k_irq
|
||||
};
|
||||
|
||||
void __init rm7k_cpu_irq_init(void)
|
||||
|
@@ -19,22 +19,22 @@
|
||||
#include <asm/mipsregs.h>
|
||||
#include <asm/system.h>
|
||||
|
||||
static inline void unmask_rm9k_irq(unsigned int irq)
|
||||
static inline void unmask_rm9k_irq(struct irq_data *d)
|
||||
{
|
||||
set_c0_intcontrol(0x1000 << (irq - RM9K_CPU_IRQ_BASE));
|
||||
set_c0_intcontrol(0x1000 << (d->irq - RM9K_CPU_IRQ_BASE));
|
||||
}
|
||||
|
||||
static inline void mask_rm9k_irq(unsigned int irq)
|
||||
static inline void mask_rm9k_irq(struct irq_data *d)
|
||||
{
|
||||
clear_c0_intcontrol(0x1000 << (irq - RM9K_CPU_IRQ_BASE));
|
||||
clear_c0_intcontrol(0x1000 << (d->irq - RM9K_CPU_IRQ_BASE));
|
||||
}
|
||||
|
||||
static inline void rm9k_cpu_irq_enable(unsigned int irq)
|
||||
static inline void rm9k_cpu_irq_enable(struct irq_data *d)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
local_irq_save(flags);
|
||||
unmask_rm9k_irq(irq);
|
||||
unmask_rm9k_irq(d);
|
||||
local_irq_restore(flags);
|
||||
}
|
||||
|
||||
@@ -43,50 +43,47 @@ static inline void rm9k_cpu_irq_enable(unsigned int irq)
|
||||
*/
|
||||
static void local_rm9k_perfcounter_irq_startup(void *args)
|
||||
{
|
||||
unsigned int irq = (unsigned int) args;
|
||||
|
||||
rm9k_cpu_irq_enable(irq);
|
||||
rm9k_cpu_irq_enable(args);
|
||||
}
|
||||
|
||||
static unsigned int rm9k_perfcounter_irq_startup(unsigned int irq)
|
||||
static unsigned int rm9k_perfcounter_irq_startup(struct irq_data *d)
|
||||
{
|
||||
on_each_cpu(local_rm9k_perfcounter_irq_startup, (void *) irq, 1);
|
||||
on_each_cpu(local_rm9k_perfcounter_irq_startup, d, 1);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void local_rm9k_perfcounter_irq_shutdown(void *args)
|
||||
{
|
||||
unsigned int irq = (unsigned int) args;
|
||||
unsigned long flags;
|
||||
|
||||
local_irq_save(flags);
|
||||
mask_rm9k_irq(irq);
|
||||
mask_rm9k_irq(args);
|
||||
local_irq_restore(flags);
|
||||
}
|
||||
|
||||
static void rm9k_perfcounter_irq_shutdown(unsigned int irq)
|
||||
static void rm9k_perfcounter_irq_shutdown(struct irq_data *d)
|
||||
{
|
||||
on_each_cpu(local_rm9k_perfcounter_irq_shutdown, (void *) irq, 1);
|
||||
on_each_cpu(local_rm9k_perfcounter_irq_shutdown, d, 1);
|
||||
}
|
||||
|
||||
static struct irq_chip rm9k_irq_controller = {
|
||||
.name = "RM9000",
|
||||
.ack = mask_rm9k_irq,
|
||||
.mask = mask_rm9k_irq,
|
||||
.mask_ack = mask_rm9k_irq,
|
||||
.unmask = unmask_rm9k_irq,
|
||||
.eoi = unmask_rm9k_irq
|
||||
.irq_ack = mask_rm9k_irq,
|
||||
.irq_mask = mask_rm9k_irq,
|
||||
.irq_mask_ack = mask_rm9k_irq,
|
||||
.irq_unmask = unmask_rm9k_irq,
|
||||
.irq_eoi = unmask_rm9k_irq
|
||||
};
|
||||
|
||||
static struct irq_chip rm9k_perfcounter_irq = {
|
||||
.name = "RM9000",
|
||||
.startup = rm9k_perfcounter_irq_startup,
|
||||
.shutdown = rm9k_perfcounter_irq_shutdown,
|
||||
.ack = mask_rm9k_irq,
|
||||
.mask = mask_rm9k_irq,
|
||||
.mask_ack = mask_rm9k_irq,
|
||||
.unmask = unmask_rm9k_irq,
|
||||
.irq_startup = rm9k_perfcounter_irq_startup,
|
||||
.irq_shutdown = rm9k_perfcounter_irq_shutdown,
|
||||
.irq_ack = mask_rm9k_irq,
|
||||
.irq_mask = mask_rm9k_irq,
|
||||
.irq_mask_ack = mask_rm9k_irq,
|
||||
.irq_unmask = unmask_rm9k_irq,
|
||||
};
|
||||
|
||||
unsigned int rm9000_perfcount_irq;
|
||||
|
@@ -81,48 +81,9 @@ void ack_bad_irq(unsigned int irq)
|
||||
|
||||
atomic_t irq_err_count;
|
||||
|
||||
/*
|
||||
* Generic, controller-independent functions:
|
||||
*/
|
||||
|
||||
int show_interrupts(struct seq_file *p, void *v)
|
||||
int arch_show_interrupts(struct seq_file *p, int prec)
|
||||
{
|
||||
int i = *(loff_t *) v, j;
|
||||
struct irqaction * action;
|
||||
unsigned long flags;
|
||||
|
||||
if (i == 0) {
|
||||
seq_printf(p, " ");
|
||||
for_each_online_cpu(j)
|
||||
seq_printf(p, "CPU%d ", j);
|
||||
seq_putc(p, '\n');
|
||||
}
|
||||
|
||||
if (i < NR_IRQS) {
|
||||
raw_spin_lock_irqsave(&irq_desc[i].lock, flags);
|
||||
action = irq_desc[i].action;
|
||||
if (!action)
|
||||
goto skip;
|
||||
seq_printf(p, "%3d: ", i);
|
||||
#ifndef CONFIG_SMP
|
||||
seq_printf(p, "%10u ", kstat_irqs(i));
|
||||
#else
|
||||
for_each_online_cpu(j)
|
||||
seq_printf(p, "%10u ", kstat_irqs_cpu(i, j));
|
||||
#endif
|
||||
seq_printf(p, " %14s", irq_desc[i].chip->name);
|
||||
seq_printf(p, " %s", action->name);
|
||||
|
||||
for (action=action->next; action; action = action->next)
|
||||
seq_printf(p, ", %s", action->name);
|
||||
|
||||
seq_putc(p, '\n');
|
||||
skip:
|
||||
raw_spin_unlock_irqrestore(&irq_desc[i].lock, flags);
|
||||
} else if (i == NR_IRQS) {
|
||||
seq_putc(p, '\n');
|
||||
seq_printf(p, "ERR: %10u\n", atomic_read(&irq_err_count));
|
||||
}
|
||||
seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read(&irq_err_count));
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -183,8 +144,8 @@ void __irq_entry do_IRQ(unsigned int irq)
|
||||
{
|
||||
irq_enter();
|
||||
check_stack_overflow();
|
||||
__DO_IRQ_SMTC_HOOK(irq);
|
||||
generic_handle_irq(irq);
|
||||
if (!smtc_handle_on_other_cpu(irq))
|
||||
generic_handle_irq(irq);
|
||||
irq_exit();
|
||||
}
|
||||
|
||||
@@ -197,7 +158,7 @@ void __irq_entry do_IRQ(unsigned int irq)
|
||||
void __irq_entry do_IRQ_no_affinity(unsigned int irq)
|
||||
{
|
||||
irq_enter();
|
||||
__NO_AFFINITY_IRQ_SMTC_HOOK(irq);
|
||||
smtc_im_backstop(irq);
|
||||
generic_handle_irq(irq);
|
||||
irq_exit();
|
||||
}
|
||||
|
@@ -37,42 +37,38 @@
|
||||
#include <asm/mipsmtregs.h>
|
||||
#include <asm/system.h>
|
||||
|
||||
static inline void unmask_mips_irq(unsigned int irq)
|
||||
static inline void unmask_mips_irq(struct irq_data *d)
|
||||
{
|
||||
set_c0_status(0x100 << (irq - MIPS_CPU_IRQ_BASE));
|
||||
set_c0_status(0x100 << (d->irq - MIPS_CPU_IRQ_BASE));
|
||||
irq_enable_hazard();
|
||||
}
|
||||
|
||||
static inline void mask_mips_irq(unsigned int irq)
|
||||
static inline void mask_mips_irq(struct irq_data *d)
|
||||
{
|
||||
clear_c0_status(0x100 << (irq - MIPS_CPU_IRQ_BASE));
|
||||
clear_c0_status(0x100 << (d->irq - MIPS_CPU_IRQ_BASE));
|
||||
irq_disable_hazard();
|
||||
}
|
||||
|
||||
static struct irq_chip mips_cpu_irq_controller = {
|
||||
.name = "MIPS",
|
||||
.ack = mask_mips_irq,
|
||||
.mask = mask_mips_irq,
|
||||
.mask_ack = mask_mips_irq,
|
||||
.unmask = unmask_mips_irq,
|
||||
.eoi = unmask_mips_irq,
|
||||
.irq_ack = mask_mips_irq,
|
||||
.irq_mask = mask_mips_irq,
|
||||
.irq_mask_ack = mask_mips_irq,
|
||||
.irq_unmask = unmask_mips_irq,
|
||||
.irq_eoi = unmask_mips_irq,
|
||||
};
|
||||
|
||||
/*
|
||||
* Basically the same as above but taking care of all the MT stuff
|
||||
*/
|
||||
|
||||
#define unmask_mips_mt_irq unmask_mips_irq
|
||||
#define mask_mips_mt_irq mask_mips_irq
|
||||
|
||||
static unsigned int mips_mt_cpu_irq_startup(unsigned int irq)
|
||||
static unsigned int mips_mt_cpu_irq_startup(struct irq_data *d)
|
||||
{
|
||||
unsigned int vpflags = dvpe();
|
||||
|
||||
clear_c0_cause(0x100 << (irq - MIPS_CPU_IRQ_BASE));
|
||||
clear_c0_cause(0x100 << (d->irq - MIPS_CPU_IRQ_BASE));
|
||||
evpe(vpflags);
|
||||
unmask_mips_mt_irq(irq);
|
||||
|
||||
unmask_mips_irq(d);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -80,22 +76,22 @@ static unsigned int mips_mt_cpu_irq_startup(unsigned int irq)
|
||||
* While we ack the interrupt interrupts are disabled and thus we don't need
|
||||
* to deal with concurrency issues. Same for mips_cpu_irq_end.
|
||||
*/
|
||||
static void mips_mt_cpu_irq_ack(unsigned int irq)
|
||||
static void mips_mt_cpu_irq_ack(struct irq_data *d)
|
||||
{
|
||||
unsigned int vpflags = dvpe();
|
||||
clear_c0_cause(0x100 << (irq - MIPS_CPU_IRQ_BASE));
|
||||
clear_c0_cause(0x100 << (d->irq - MIPS_CPU_IRQ_BASE));
|
||||
evpe(vpflags);
|
||||
mask_mips_mt_irq(irq);
|
||||
mask_mips_irq(d);
|
||||
}
|
||||
|
||||
static struct irq_chip mips_mt_cpu_irq_controller = {
|
||||
.name = "MIPS",
|
||||
.startup = mips_mt_cpu_irq_startup,
|
||||
.ack = mips_mt_cpu_irq_ack,
|
||||
.mask = mask_mips_mt_irq,
|
||||
.mask_ack = mips_mt_cpu_irq_ack,
|
||||
.unmask = unmask_mips_mt_irq,
|
||||
.eoi = unmask_mips_mt_irq,
|
||||
.irq_startup = mips_mt_cpu_irq_startup,
|
||||
.irq_ack = mips_mt_cpu_irq_ack,
|
||||
.irq_mask = mask_mips_irq,
|
||||
.irq_mask_ack = mips_mt_cpu_irq_ack,
|
||||
.irq_unmask = unmask_mips_irq,
|
||||
.irq_eoi = unmask_mips_irq,
|
||||
};
|
||||
|
||||
void __init mips_cpu_irq_init(void)
|
||||
|
@@ -63,9 +63,9 @@ static struct {
|
||||
unsigned char mode;
|
||||
} txx9irq[TXx9_MAX_IR] __read_mostly;
|
||||
|
||||
static void txx9_irq_unmask(unsigned int irq)
|
||||
static void txx9_irq_unmask(struct irq_data *d)
|
||||
{
|
||||
unsigned int irq_nr = irq - TXX9_IRQ_BASE;
|
||||
unsigned int irq_nr = d->irq - TXX9_IRQ_BASE;
|
||||
u32 __iomem *ilrp = &txx9_ircptr->ilr[(irq_nr % 16 ) / 2];
|
||||
int ofs = irq_nr / 16 * 16 + (irq_nr & 1) * 8;
|
||||
|
||||
@@ -79,9 +79,9 @@ static void txx9_irq_unmask(unsigned int irq)
|
||||
#endif
|
||||
}
|
||||
|
||||
static inline void txx9_irq_mask(unsigned int irq)
|
||||
static inline void txx9_irq_mask(struct irq_data *d)
|
||||
{
|
||||
unsigned int irq_nr = irq - TXX9_IRQ_BASE;
|
||||
unsigned int irq_nr = d->irq - TXX9_IRQ_BASE;
|
||||
u32 __iomem *ilrp = &txx9_ircptr->ilr[(irq_nr % 16) / 2];
|
||||
int ofs = irq_nr / 16 * 16 + (irq_nr & 1) * 8;
|
||||
|
||||
@@ -99,19 +99,19 @@ static inline void txx9_irq_mask(unsigned int irq)
|
||||
#endif
|
||||
}
|
||||
|
||||
static void txx9_irq_mask_ack(unsigned int irq)
|
||||
static void txx9_irq_mask_ack(struct irq_data *d)
|
||||
{
|
||||
unsigned int irq_nr = irq - TXX9_IRQ_BASE;
|
||||
unsigned int irq_nr = d->irq - TXX9_IRQ_BASE;
|
||||
|
||||
txx9_irq_mask(irq);
|
||||
txx9_irq_mask(d);
|
||||
/* clear edge detection */
|
||||
if (unlikely(TXx9_IRCR_EDGE(txx9irq[irq_nr].mode)))
|
||||
__raw_writel(TXx9_IRSCR_EIClrE | irq_nr, &txx9_ircptr->scr);
|
||||
}
|
||||
|
||||
static int txx9_irq_set_type(unsigned int irq, unsigned int flow_type)
|
||||
static int txx9_irq_set_type(struct irq_data *d, unsigned int flow_type)
|
||||
{
|
||||
unsigned int irq_nr = irq - TXX9_IRQ_BASE;
|
||||
unsigned int irq_nr = d->irq - TXX9_IRQ_BASE;
|
||||
u32 cr;
|
||||
u32 __iomem *crp;
|
||||
int ofs;
|
||||
@@ -139,11 +139,11 @@ static int txx9_irq_set_type(unsigned int irq, unsigned int flow_type)
|
||||
|
||||
static struct irq_chip txx9_irq_chip = {
|
||||
.name = "TXX9",
|
||||
.ack = txx9_irq_mask_ack,
|
||||
.mask = txx9_irq_mask,
|
||||
.mask_ack = txx9_irq_mask_ack,
|
||||
.unmask = txx9_irq_unmask,
|
||||
.set_type = txx9_irq_set_type,
|
||||
.irq_ack = txx9_irq_mask_ack,
|
||||
.irq_mask = txx9_irq_mask,
|
||||
.irq_mask_ack = txx9_irq_mask_ack,
|
||||
.irq_unmask = txx9_irq_unmask,
|
||||
.irq_set_type = txx9_irq_set_type,
|
||||
};
|
||||
|
||||
void __init txx9_irq_init(unsigned long baseaddr)
|
||||
|
@@ -586,6 +586,10 @@ einval: li v0, -ENOSYS
|
||||
sys sys_fanotify_init 2
|
||||
sys sys_fanotify_mark 6
|
||||
sys sys_prlimit64 4
|
||||
sys sys_name_to_handle_at 5
|
||||
sys sys_open_by_handle_at 3 /* 4340 */
|
||||
sys sys_clock_adjtime 2
|
||||
sys sys_syncfs 1
|
||||
.endm
|
||||
|
||||
/* We pre-compute the number of _instruction_ bytes needed to
|
||||
|
@@ -425,4 +425,8 @@ sys_call_table:
|
||||
PTR sys_fanotify_init /* 5295 */
|
||||
PTR sys_fanotify_mark
|
||||
PTR sys_prlimit64
|
||||
PTR sys_name_to_handle_at
|
||||
PTR sys_open_by_handle_at
|
||||
PTR sys_clock_adjtime /* 5300 */
|
||||
PTR sys_syncfs
|
||||
.size sys_call_table,.-sys_call_table
|
||||
|
@@ -425,4 +425,8 @@ EXPORT(sysn32_call_table)
|
||||
PTR sys_fanotify_init /* 6300 */
|
||||
PTR sys_fanotify_mark
|
||||
PTR sys_prlimit64
|
||||
PTR sys_name_to_handle_at
|
||||
PTR sys_open_by_handle_at
|
||||
PTR compat_sys_clock_adjtime /* 6305 */
|
||||
PTR sys_syncfs
|
||||
.size sysn32_call_table,.-sysn32_call_table
|
||||
|
@@ -543,4 +543,8 @@ sys_call_table:
|
||||
PTR sys_fanotify_init
|
||||
PTR sys_32_fanotify_mark
|
||||
PTR sys_prlimit64
|
||||
PTR sys_name_to_handle_at
|
||||
PTR compat_sys_open_by_handle_at /* 4340 */
|
||||
PTR compat_sys_clock_adjtime
|
||||
PTR sys_syncfs
|
||||
.size sys_call_table,.-sys_call_table
|
||||
|
@@ -677,8 +677,9 @@ void smtc_set_irq_affinity(unsigned int irq, cpumask_t affinity)
|
||||
*/
|
||||
}
|
||||
|
||||
void smtc_forward_irq(unsigned int irq)
|
||||
void smtc_forward_irq(struct irq_data *d)
|
||||
{
|
||||
unsigned int irq = d->irq;
|
||||
int target;
|
||||
|
||||
/*
|
||||
@@ -692,7 +693,7 @@ void smtc_forward_irq(unsigned int irq)
|
||||
* and efficiency, we just pick the easiest one to find.
|
||||
*/
|
||||
|
||||
target = cpumask_first(irq_desc[irq].affinity);
|
||||
target = cpumask_first(d->affinity);
|
||||
|
||||
/*
|
||||
* We depend on the platform code to have correctly processed
|
||||
@@ -707,12 +708,10 @@ void smtc_forward_irq(unsigned int irq)
|
||||
*/
|
||||
|
||||
/* If no one is eligible, service locally */
|
||||
if (target >= NR_CPUS) {
|
||||
if (target >= NR_CPUS)
|
||||
do_IRQ_no_affinity(irq);
|
||||
return;
|
||||
}
|
||||
|
||||
smtc_send_ipi(target, IRQ_AFFINITY_IPI, irq);
|
||||
else
|
||||
smtc_send_ipi(target, IRQ_AFFINITY_IPI, irq);
|
||||
}
|
||||
|
||||
#endif /* CONFIG_MIPS_MT_SMTC_IRQAFF */
|
||||
|
Reference in New Issue
Block a user