[POWERPC] Use the genirq framework

This adapts the generic powerpc interrupt handling code, and all of
the platforms except for the embedded 6xx machines, to use the new
genirq framework.

Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Signed-off-by: Paul Mackerras <paulus@samba.org>
This commit is contained in:
Benjamin Herrenschmidt
2006-07-03 19:32:51 +10:00
committed by Paul Mackerras
parent 5a43a066b1
commit b9e5b4e6a9
17 changed files with 692 additions and 635 deletions

View File

@ -31,23 +31,6 @@
#include "xics.h"
static unsigned int xics_startup(unsigned int irq);
static void xics_enable_irq(unsigned int irq);
static void xics_disable_irq(unsigned int irq);
static void xics_mask_and_ack_irq(unsigned int irq);
static void xics_end_irq(unsigned int irq);
static void xics_set_affinity(unsigned int irq_nr, cpumask_t cpumask);
static struct hw_interrupt_type xics_pic = {
.typename = " XICS ",
.startup = xics_startup,
.enable = xics_enable_irq,
.disable = xics_disable_irq,
.ack = xics_mask_and_ack_irq,
.end = xics_end_irq,
.set_affinity = xics_set_affinity
};
/* This is used to map real irq numbers to virtual */
static struct radix_tree_root irq_map = RADIX_TREE_INIT(GFP_ATOMIC);
@ -98,48 +81,34 @@ static int ibm_set_xive;
static int ibm_int_on;
static int ibm_int_off;
typedef struct {
int (*xirr_info_get)(int cpu);
void (*xirr_info_set)(int cpu, int val);
void (*cppr_info)(int cpu, u8 val);
void (*qirr_info)(int cpu, u8 val);
} xics_ops;
/* Direct HW low level accessors */
/* SMP */
static int pSeries_xirr_info_get(int n_cpu)
static inline int direct_xirr_info_get(int n_cpu)
{
return in_be32(&xics_per_cpu[n_cpu]->xirr.word);
}
static void pSeries_xirr_info_set(int n_cpu, int value)
static inline void direct_xirr_info_set(int n_cpu, int value)
{
out_be32(&xics_per_cpu[n_cpu]->xirr.word, value);
}
static void pSeries_cppr_info(int n_cpu, u8 value)
static inline void direct_cppr_info(int n_cpu, u8 value)
{
out_8(&xics_per_cpu[n_cpu]->xirr.bytes[0], value);
}
static void pSeries_qirr_info(int n_cpu, u8 value)
static inline void direct_qirr_info(int n_cpu, u8 value)
{
out_8(&xics_per_cpu[n_cpu]->qirr.bytes[0], value);
}
static xics_ops pSeries_ops = {
pSeries_xirr_info_get,
pSeries_xirr_info_set,
pSeries_cppr_info,
pSeries_qirr_info
};
static xics_ops *ops = &pSeries_ops;
/* LPAR low level accessors */
/* LPAR */
static inline long plpar_eoi(unsigned long xirr)
{
return plpar_hcall_norets(H_EOI, xirr);
@ -161,7 +130,7 @@ static inline long plpar_xirr(unsigned long *xirr_ret)
return plpar_hcall(H_XIRR, 0, 0, 0, 0, xirr_ret, &dummy, &dummy);
}
static int pSeriesLP_xirr_info_get(int n_cpu)
static inline int lpar_xirr_info_get(int n_cpu)
{
unsigned long lpar_rc;
unsigned long return_value;
@ -172,7 +141,7 @@ static int pSeriesLP_xirr_info_get(int n_cpu)
return (int)return_value;
}
static void pSeriesLP_xirr_info_set(int n_cpu, int value)
static inline void lpar_xirr_info_set(int n_cpu, int value)
{
unsigned long lpar_rc;
unsigned long val64 = value & 0xffffffff;
@ -183,7 +152,7 @@ static void pSeriesLP_xirr_info_set(int n_cpu, int value)
val64);
}
void pSeriesLP_cppr_info(int n_cpu, u8 value)
static inline void lpar_cppr_info(int n_cpu, u8 value)
{
unsigned long lpar_rc;
@ -192,7 +161,7 @@ void pSeriesLP_cppr_info(int n_cpu, u8 value)
panic("bad return code cppr - rc = %lx\n", lpar_rc);
}
static void pSeriesLP_qirr_info(int n_cpu , u8 value)
static inline void lpar_qirr_info(int n_cpu , u8 value)
{
unsigned long lpar_rc;
@ -201,36 +170,9 @@ static void pSeriesLP_qirr_info(int n_cpu , u8 value)
panic("bad return code qirr - rc = %lx\n", lpar_rc);
}
xics_ops pSeriesLP_ops = {
pSeriesLP_xirr_info_get,
pSeriesLP_xirr_info_set,
pSeriesLP_cppr_info,
pSeriesLP_qirr_info
};
static unsigned int xics_startup(unsigned int virq)
{
unsigned int irq;
/* High level handlers and init code */
irq = irq_offset_down(virq);
if (radix_tree_insert(&irq_map, virt_irq_to_real(irq),
&virt_irq_to_real_map[irq]) == -ENOMEM)
printk(KERN_CRIT "Out of memory creating real -> virtual"
" IRQ mapping for irq %u (real 0x%x)\n",
virq, virt_irq_to_real(irq));
xics_enable_irq(virq);
return 0; /* return value is ignored */
}
static unsigned int real_irq_to_virt(unsigned int real_irq)
{
unsigned int *ptr;
ptr = radix_tree_lookup(&irq_map, real_irq);
if (ptr == NULL)
return NO_IRQ;
return ptr - virt_irq_to_real_map;
}
#ifdef CONFIG_SMP
static int get_irq_server(unsigned int irq)
@ -264,17 +206,20 @@ static int get_irq_server(unsigned int irq)
}
#endif
static void xics_enable_irq(unsigned int virq)
static void xics_unmask_irq(unsigned int virq)
{
unsigned int irq;
int call_status;
unsigned int server;
irq = virt_irq_to_real(irq_offset_down(virq));
if (irq == XICS_IPI)
WARN_ON(irq == NO_IRQ);
if (irq == XICS_IPI || irq == NO_IRQ)
return;
server = get_irq_server(virq);
call_status = rtas_call(ibm_set_xive, 3, 1, NULL, irq, server,
DEFAULT_PRIORITY);
if (call_status != 0) {
@ -293,7 +238,7 @@ static void xics_enable_irq(unsigned int virq)
}
}
static void xics_disable_real_irq(unsigned int irq)
static void xics_mask_real_irq(unsigned int irq)
{
int call_status;
unsigned int server;
@ -318,75 +263,104 @@ static void xics_disable_real_irq(unsigned int irq)
}
}
static void xics_disable_irq(unsigned int virq)
static void xics_mask_irq(unsigned int virq)
{
unsigned int irq;
irq = virt_irq_to_real(irq_offset_down(virq));
xics_disable_real_irq(irq);
WARN_ON(irq == NO_IRQ);
if (irq != NO_IRQ)
xics_mask_real_irq(irq);
}
static void xics_end_irq(unsigned int irq)
static void xics_set_irq_revmap(unsigned int virq)
{
unsigned int irq;
irq = irq_offset_down(virq);
if (radix_tree_insert(&irq_map, virt_irq_to_real(irq),
&virt_irq_to_real_map[irq]) == -ENOMEM)
printk(KERN_CRIT "Out of memory creating real -> virtual"
" IRQ mapping for irq %u (real 0x%x)\n",
virq, virt_irq_to_real(irq));
}
static unsigned int xics_startup(unsigned int virq)
{
xics_set_irq_revmap(virq);
xics_unmask_irq(virq);
return 0;
}
static unsigned int real_irq_to_virt(unsigned int real_irq)
{
unsigned int *ptr;
ptr = radix_tree_lookup(&irq_map, real_irq);
if (ptr == NULL)
return NO_IRQ;
return ptr - virt_irq_to_real_map;
}
static void xics_eoi_direct(unsigned int irq)
{
int cpu = smp_processor_id();
iosync();
ops->xirr_info_set(cpu, ((0xff << 24) |
direct_xirr_info_set(cpu, ((0xff << 24) |
(virt_irq_to_real(irq_offset_down(irq)))));
}
static void xics_eoi_lpar(unsigned int irq)
{
int cpu = smp_processor_id();
iosync();
lpar_xirr_info_set(cpu, ((0xff << 24) |
(virt_irq_to_real(irq_offset_down(irq)))));
}
static void xics_mask_and_ack_irq(unsigned int irq)
static inline int xics_remap_irq(int vec)
{
int cpu = smp_processor_id();
if (irq < irq_offset_value()) {
i8259_pic.ack(irq);
iosync();
ops->xirr_info_set(cpu, ((0xff<<24) |
xics_irq_8259_cascade_real));
iosync();
}
}
int xics_get_irq(struct pt_regs *regs)
{
unsigned int cpu = smp_processor_id();
unsigned int vec;
int irq;
vec = ops->xirr_info_get(cpu);
/* (vec >> 24) == old priority */
vec &= 0x00ffffff;
/* for sanity, this had better be < NR_IRQS - 16 */
if (vec == xics_irq_8259_cascade_real) {
irq = i8259_irq(regs);
xics_end_irq(irq_offset_up(xics_irq_8259_cascade));
} else if (vec == XICS_IRQ_SPURIOUS) {
irq = -1;
} else {
irq = real_irq_to_virt(vec);
if (irq == NO_IRQ)
irq = real_irq_to_virt_slowpath(vec);
if (irq == NO_IRQ) {
printk(KERN_ERR "Interrupt %u (real) is invalid,"
" disabling it.\n", vec);
xics_disable_real_irq(vec);
} else
irq = irq_offset_up(irq);
}
return irq;
if (vec == XICS_IRQ_SPURIOUS)
return NO_IRQ;
irq = real_irq_to_virt(vec);
if (irq == NO_IRQ)
irq = real_irq_to_virt_slowpath(vec);
if (likely(irq != NO_IRQ))
return irq_offset_up(irq);
printk(KERN_ERR "Interrupt %u (real) is invalid,"
" disabling it.\n", vec);
xics_mask_real_irq(vec);
return NO_IRQ;
}
static int xics_get_irq_direct(struct pt_regs *regs)
{
unsigned int cpu = smp_processor_id();
return xics_remap_irq(direct_xirr_info_get(cpu));
}
static int xics_get_irq_lpar(struct pt_regs *regs)
{
unsigned int cpu = smp_processor_id();
return xics_remap_irq(lpar_xirr_info_get(cpu));
}
#ifdef CONFIG_SMP
static irqreturn_t xics_ipi_action(int irq, void *dev_id, struct pt_regs *regs)
static irqreturn_t xics_ipi_dispatch(int cpu, struct pt_regs *regs)
{
int cpu = smp_processor_id();
ops->qirr_info(cpu, 0xff);
WARN_ON(cpu_is_offline(cpu));
while (xics_ipi_message[cpu].value) {
@ -418,18 +392,108 @@ static irqreturn_t xics_ipi_action(int irq, void *dev_id, struct pt_regs *regs)
return IRQ_HANDLED;
}
static irqreturn_t xics_ipi_action_direct(int irq, void *dev_id, struct pt_regs *regs)
{
int cpu = smp_processor_id();
direct_qirr_info(cpu, 0xff);
return xics_ipi_dispatch(cpu, regs);
}
static irqreturn_t xics_ipi_action_lpar(int irq, void *dev_id, struct pt_regs *regs)
{
int cpu = smp_processor_id();
lpar_qirr_info(cpu, 0xff);
return xics_ipi_dispatch(cpu, regs);
}
void xics_cause_IPI(int cpu)
{
ops->qirr_info(cpu, IPI_PRIORITY);
if (firmware_has_feature(FW_FEATURE_LPAR))
lpar_qirr_info(cpu, IPI_PRIORITY);
else
direct_qirr_info(cpu, IPI_PRIORITY);
}
#endif /* CONFIG_SMP */
static void xics_set_cpu_priority(int cpu, unsigned char cppr)
{
if (firmware_has_feature(FW_FEATURE_LPAR))
lpar_cppr_info(cpu, cppr);
else
direct_cppr_info(cpu, cppr);
iosync();
}
static void xics_set_affinity(unsigned int virq, cpumask_t cpumask)
{
unsigned int irq;
int status;
int xics_status[2];
unsigned long newmask;
cpumask_t tmp = CPU_MASK_NONE;
irq = virt_irq_to_real(irq_offset_down(virq));
if (irq == XICS_IPI || irq == NO_IRQ)
return;
status = rtas_call(ibm_get_xive, 1, 3, xics_status, irq);
if (status) {
printk(KERN_ERR "xics_set_affinity: irq=%u ibm,get-xive "
"returns %d\n", irq, status);
return;
}
/* For the moment only implement delivery to all cpus or one cpu */
if (cpus_equal(cpumask, CPU_MASK_ALL)) {
newmask = default_distrib_server;
} else {
cpus_and(tmp, cpu_online_map, cpumask);
if (cpus_empty(tmp))
return;
newmask = get_hard_smp_processor_id(first_cpu(tmp));
}
status = rtas_call(ibm_set_xive, 3, 1, NULL,
irq, newmask, xics_status[1]);
if (status) {
printk(KERN_ERR "xics_set_affinity: irq=%u ibm,set-xive "
"returns %d\n", irq, status);
return;
}
}
static struct irq_chip xics_pic_direct = {
.typename = " XICS ",
.startup = xics_startup,
.mask = xics_mask_irq,
.unmask = xics_unmask_irq,
.eoi = xics_eoi_direct,
.set_affinity = xics_set_affinity
};
static struct irq_chip xics_pic_lpar = {
.typename = " XICS ",
.startup = xics_startup,
.mask = xics_mask_irq,
.unmask = xics_unmask_irq,
.eoi = xics_eoi_lpar,
.set_affinity = xics_set_affinity
};
void xics_setup_cpu(void)
{
int cpu = smp_processor_id();
ops->cppr_info(cpu, 0xff);
iosync();
xics_set_cpu_priority(cpu, 0xff);
/*
* Put the calling processor into the GIQ. This is really only
@ -453,6 +517,7 @@ void xics_init_IRQ(void)
unsigned long addr;
unsigned long size;
} intnodes[NR_CPUS];
struct irq_chip *chip;
ppc64_boot_msg(0x20, "XICS Init");
@ -519,26 +584,10 @@ nextnode:
intr_base = intnodes[0].addr;
intr_size = intnodes[0].size;
np = of_find_node_by_type(NULL, "interrupt-controller");
if (!np) {
printk(KERN_DEBUG "xics: no ISA interrupt controller\n");
xics_irq_8259_cascade_real = -1;
xics_irq_8259_cascade = -1;
} else {
ireg = (uint *) get_property(np, "interrupts", NULL);
if (!ireg)
panic("xics_init_IRQ: can't find ISA interrupts property");
xics_irq_8259_cascade_real = *ireg;
xics_irq_8259_cascade
= virt_irq_create_mapping(xics_irq_8259_cascade_real);
i8259_init(0, 0);
of_node_put(np);
}
if (firmware_has_feature(FW_FEATURE_LPAR))
ops = &pSeriesLP_ops;
else {
if (firmware_has_feature(FW_FEATURE_LPAR)) {
ppc_md.get_irq = xics_get_irq_lpar;
chip = &xics_pic_lpar;
} else {
#ifdef CONFIG_SMP
for_each_possible_cpu(i) {
int hard_id;
@ -554,32 +603,54 @@ nextnode:
#else
xics_per_cpu[0] = ioremap(intr_base, intr_size);
#endif /* CONFIG_SMP */
ppc_md.get_irq = xics_get_irq_direct;
chip = &xics_pic_direct;
}
for (i = irq_offset_value(); i < NR_IRQS; ++i)
get_irq_desc(i)->chip = &xics_pic;
for (i = irq_offset_value(); i < NR_IRQS; ++i) {
/* All IRQs on XICS are level for now. MSI code may want to modify
* that for reporting purposes
*/
get_irq_desc(i)->status |= IRQ_LEVEL;
set_irq_chip_and_handler(i, chip, handle_fasteoi_irq);
}
xics_setup_cpu();
ppc64_boot_msg(0x21, "XICS Done");
}
/*
* We cant do this in init_IRQ because we need the memory subsystem up for
* request_irq()
*/
static int __init xics_setup_i8259(void)
static int xics_setup_8259_cascade(void)
{
if (ppc64_interrupt_controller == IC_PPC_XIC &&
xics_irq_8259_cascade != -1) {
if (request_irq(irq_offset_up(xics_irq_8259_cascade),
no_action, 0, "8259 cascade", NULL))
printk(KERN_ERR "xics_setup_i8259: couldn't get 8259 "
"cascade\n");
struct device_node *np;
uint *ireg;
np = of_find_node_by_type(NULL, "interrupt-controller");
if (np == NULL) {
printk(KERN_WARNING "xics: no ISA interrupt controller\n");
xics_irq_8259_cascade_real = -1;
xics_irq_8259_cascade = -1;
return 0;
}
ireg = (uint *) get_property(np, "interrupts", NULL);
if (!ireg)
panic("xics_init_IRQ: can't find ISA interrupts property");
xics_irq_8259_cascade_real = *ireg;
xics_irq_8259_cascade = irq_offset_up
(virt_irq_create_mapping(xics_irq_8259_cascade_real));
i8259_init(0, 0);
of_node_put(np);
xics_set_irq_revmap(xics_irq_8259_cascade);
set_irq_chained_handler(xics_irq_8259_cascade, pSeries_8259_cascade);
return 0;
}
arch_initcall(xics_setup_i8259);
arch_initcall(xics_setup_8259_cascade);
#ifdef CONFIG_SMP
void xics_request_IPIs(void)
@ -590,61 +661,22 @@ void xics_request_IPIs(void)
* IPIs are marked IRQF_DISABLED as they must run with irqs
* disabled
*/
request_irq(irq_offset_up(XICS_IPI), xics_ipi_action,
IRQF_DISABLED, "IPI", NULL);
get_irq_desc(irq_offset_up(XICS_IPI))->status |= IRQ_PER_CPU;
}
#endif
static void xics_set_affinity(unsigned int virq, cpumask_t cpumask)
{
unsigned int irq;
int status;
int xics_status[2];
unsigned long newmask;
cpumask_t tmp = CPU_MASK_NONE;
irq = virt_irq_to_real(irq_offset_down(virq));
if (irq == XICS_IPI || irq == NO_IRQ)
return;
status = rtas_call(ibm_get_xive, 1, 3, xics_status, irq);
if (status) {
printk(KERN_ERR "xics_set_affinity: irq=%u ibm,get-xive "
"returns %d\n", irq, status);
return;
}
/* For the moment only implement delivery to all cpus or one cpu */
if (cpus_equal(cpumask, CPU_MASK_ALL)) {
newmask = default_distrib_server;
} else {
cpus_and(tmp, cpu_online_map, cpumask);
if (cpus_empty(tmp))
return;
newmask = get_hard_smp_processor_id(first_cpu(tmp));
}
status = rtas_call(ibm_set_xive, 3, 1, NULL,
irq, newmask, xics_status[1]);
if (status) {
printk(KERN_ERR "xics_set_affinity: irq=%u ibm,set-xive "
"returns %d\n", irq, status);
return;
}
set_irq_handler(irq_offset_up(XICS_IPI), handle_percpu_irq);
if (firmware_has_feature(FW_FEATURE_LPAR))
request_irq(irq_offset_up(XICS_IPI), xics_ipi_action_lpar,
SA_INTERRUPT, "IPI", NULL);
else
request_irq(irq_offset_up(XICS_IPI), xics_ipi_action_direct,
SA_INTERRUPT, "IPI", NULL);
}
#endif /* CONFIG_SMP */
void xics_teardown_cpu(int secondary)
{
struct irq_desc *desc = get_irq_desc(irq_offset_up(XICS_IPI));
int cpu = smp_processor_id();
ops->cppr_info(cpu, 0x00);
iosync();
/* Clear IPI */
ops->qirr_info(cpu, 0xff);
xics_set_cpu_priority(cpu, 0);
/*
* we need to EOI the IPI if we got here from kexec down IPI
@ -653,7 +685,8 @@ void xics_teardown_cpu(int secondary)
* should we be flagging idle loop instead?
* or creating some task to be scheduled?
*/
ops->xirr_info_set(cpu, XICS_IPI);
if (desc->chip && desc->chip->eoi)
desc->chip->eoi(XICS_IPI);
/*
* Some machines need to have at least one cpu in the GIQ,
@ -674,8 +707,7 @@ void xics_migrate_irqs_away(void)
unsigned int irq, virq, cpu = smp_processor_id();
/* Reject any interrupt that was queued to us... */
ops->cppr_info(cpu, 0);
iosync();
xics_set_cpu_priority(cpu, 0);
/* remove ourselves from the global interrupt queue */
status = rtas_set_indicator(GLOBAL_INTERRUPT_QUEUE,
@ -683,11 +715,10 @@ void xics_migrate_irqs_away(void)
WARN_ON(status < 0);
/* Allow IPIs again... */
ops->cppr_info(cpu, DEFAULT_PRIORITY);
iosync();
xics_set_cpu_priority(cpu, DEFAULT_PRIORITY);
for_each_irq(virq) {
irq_desc_t *desc;
struct irq_desc *desc;
int xics_status[2];
unsigned long flags;