HUB interrupts are allocated per node, not per slice. Make manipulation
of the interrupt mask register atomic by disabling interrupts. Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
This commit is contained in:
@@ -56,12 +56,12 @@ static void __init per_hub_init(cnodeid_t cnode)
|
|||||||
{
|
{
|
||||||
struct hub_data *hub = hub_data(cnode);
|
struct hub_data *hub = hub_data(cnode);
|
||||||
nasid_t nasid = COMPACT_TO_NASID_NODEID(cnode);
|
nasid_t nasid = COMPACT_TO_NASID_NODEID(cnode);
|
||||||
|
int i;
|
||||||
|
|
||||||
cpu_set(smp_processor_id(), hub->h_cpus);
|
cpu_set(smp_processor_id(), hub->h_cpus);
|
||||||
|
|
||||||
if (test_and_set_bit(cnode, hub_init_mask))
|
if (test_and_set_bit(cnode, hub_init_mask))
|
||||||
return;
|
return;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Set CRB timeout at 5ms, (< PI timeout of 10ms)
|
* Set CRB timeout at 5ms, (< PI timeout of 10ms)
|
||||||
*/
|
*/
|
||||||
@@ -88,6 +88,24 @@ static void __init per_hub_init(cnodeid_t cnode)
|
|||||||
__flush_cache_all();
|
__flush_cache_all();
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Some interrupts are reserved by hardware or by software convention.
|
||||||
|
* Mark these as reserved right away so they won't be used accidently
|
||||||
|
* later.
|
||||||
|
*/
|
||||||
|
for (i = 0; i <= BASE_PCI_IRQ; i++) {
|
||||||
|
__set_bit(i, hub->irq_alloc_mask);
|
||||||
|
LOCAL_HUB_CLR_INTR(INT_PEND0_BASELVL + i);
|
||||||
|
}
|
||||||
|
|
||||||
|
__set_bit(IP_PEND0_6_63, hub->irq_alloc_mask);
|
||||||
|
LOCAL_HUB_S(PI_INT_PEND_MOD, IP_PEND0_6_63);
|
||||||
|
|
||||||
|
for (i = NI_BRDCAST_ERR_A; i <= MSC_PANIC_INTR; i++) {
|
||||||
|
__set_bit(i, hub->irq_alloc_mask);
|
||||||
|
LOCAL_HUB_CLR_INTR(INT_PEND1_BASELVL + i);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void __init per_cpu_init(void)
|
void __init per_cpu_init(void)
|
||||||
@@ -104,29 +122,11 @@ void __init per_cpu_init(void)
|
|||||||
|
|
||||||
clear_c0_status(ST0_IM);
|
clear_c0_status(ST0_IM);
|
||||||
|
|
||||||
|
per_hub_init(cnode);
|
||||||
|
|
||||||
for (i = 0; i < LEVELS_PER_SLICE; i++)
|
for (i = 0; i < LEVELS_PER_SLICE; i++)
|
||||||
si->level_to_irq[i] = -1;
|
si->level_to_irq[i] = -1;
|
||||||
|
|
||||||
/*
|
|
||||||
* Some interrupts are reserved by hardware or by software convention.
|
|
||||||
* Mark these as reserved right away so they won't be used accidently
|
|
||||||
* later.
|
|
||||||
*/
|
|
||||||
for (i = 0; i <= BASE_PCI_IRQ; i++) {
|
|
||||||
__set_bit(i, si->irq_alloc_mask);
|
|
||||||
LOCAL_HUB_S(PI_INT_PEND_MOD, i);
|
|
||||||
}
|
|
||||||
|
|
||||||
__set_bit(IP_PEND0_6_63, si->irq_alloc_mask);
|
|
||||||
LOCAL_HUB_S(PI_INT_PEND_MOD, IP_PEND0_6_63);
|
|
||||||
|
|
||||||
for (i = NI_BRDCAST_ERR_A; i <= MSC_PANIC_INTR; i++) {
|
|
||||||
__set_bit(i, si->irq_alloc_mask + 1);
|
|
||||||
LOCAL_HUB_S(PI_INT_PEND_MOD, i);
|
|
||||||
}
|
|
||||||
|
|
||||||
LOCAL_HUB_L(PI_INT_PEND0);
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* We use this so we can find the local hub's data as fast as only
|
* We use this so we can find the local hub's data as fast as only
|
||||||
* possible.
|
* possible.
|
||||||
@@ -140,8 +140,6 @@ void __init per_cpu_init(void)
|
|||||||
install_cpu_nmi_handler(cputoslice(cpu));
|
install_cpu_nmi_handler(cputoslice(cpu));
|
||||||
|
|
||||||
set_c0_status(SRB_DEV0 | SRB_DEV1);
|
set_c0_status(SRB_DEV0 | SRB_DEV1);
|
||||||
|
|
||||||
per_hub_init(cnode);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@@ -74,14 +74,15 @@ extern int irq_to_slot[];
|
|||||||
|
|
||||||
static inline int alloc_level(int cpu, int irq)
|
static inline int alloc_level(int cpu, int irq)
|
||||||
{
|
{
|
||||||
|
struct hub_data *hub = hub_data(cpu_to_node(cpu));
|
||||||
struct slice_data *si = cpu_data[cpu].data;
|
struct slice_data *si = cpu_data[cpu].data;
|
||||||
int level; /* pre-allocated entries */
|
int level;
|
||||||
|
|
||||||
level = find_first_zero_bit(si->irq_alloc_mask, LEVELS_PER_SLICE);
|
level = find_first_zero_bit(hub->irq_alloc_mask, LEVELS_PER_SLICE);
|
||||||
if (level >= LEVELS_PER_SLICE)
|
if (level >= LEVELS_PER_SLICE)
|
||||||
panic("Cpu %d flooded with devices\n", cpu);
|
panic("Cpu %d flooded with devices\n", cpu);
|
||||||
|
|
||||||
__set_bit(level, si->irq_alloc_mask);
|
__set_bit(level, hub->irq_alloc_mask);
|
||||||
si->level_to_irq[level] = irq;
|
si->level_to_irq[level] = irq;
|
||||||
|
|
||||||
return level;
|
return level;
|
||||||
@@ -216,9 +217,11 @@ static int intr_connect_level(int cpu, int bit)
|
|||||||
{
|
{
|
||||||
nasid_t nasid = COMPACT_TO_NASID_NODEID(cpu_to_node(cpu));
|
nasid_t nasid = COMPACT_TO_NASID_NODEID(cpu_to_node(cpu));
|
||||||
struct slice_data *si = cpu_data[cpu].data;
|
struct slice_data *si = cpu_data[cpu].data;
|
||||||
|
unsigned long flags;
|
||||||
|
|
||||||
__set_bit(bit, si->irq_enable_mask);
|
set_bit(bit, si->irq_enable_mask);
|
||||||
|
|
||||||
|
local_irq_save(flags);
|
||||||
if (!cputoslice(cpu)) {
|
if (!cputoslice(cpu)) {
|
||||||
REMOTE_HUB_S(nasid, PI_INT_MASK0_A, si->irq_enable_mask[0]);
|
REMOTE_HUB_S(nasid, PI_INT_MASK0_A, si->irq_enable_mask[0]);
|
||||||
REMOTE_HUB_S(nasid, PI_INT_MASK1_A, si->irq_enable_mask[1]);
|
REMOTE_HUB_S(nasid, PI_INT_MASK1_A, si->irq_enable_mask[1]);
|
||||||
@@ -226,6 +229,7 @@ static int intr_connect_level(int cpu, int bit)
|
|||||||
REMOTE_HUB_S(nasid, PI_INT_MASK0_B, si->irq_enable_mask[0]);
|
REMOTE_HUB_S(nasid, PI_INT_MASK0_B, si->irq_enable_mask[0]);
|
||||||
REMOTE_HUB_S(nasid, PI_INT_MASK1_B, si->irq_enable_mask[1]);
|
REMOTE_HUB_S(nasid, PI_INT_MASK1_B, si->irq_enable_mask[1]);
|
||||||
}
|
}
|
||||||
|
local_irq_restore(flags);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
@@ -235,7 +239,7 @@ static int intr_disconnect_level(int cpu, int bit)
|
|||||||
nasid_t nasid = COMPACT_TO_NASID_NODEID(cpu_to_node(cpu));
|
nasid_t nasid = COMPACT_TO_NASID_NODEID(cpu_to_node(cpu));
|
||||||
struct slice_data *si = cpu_data[cpu].data;
|
struct slice_data *si = cpu_data[cpu].data;
|
||||||
|
|
||||||
__clear_bit(bit, si->irq_enable_mask);
|
clear_bit(bit, si->irq_enable_mask);
|
||||||
|
|
||||||
if (!cputoslice(cpu)) {
|
if (!cputoslice(cpu)) {
|
||||||
REMOTE_HUB_S(nasid, PI_INT_MASK0_A, si->irq_enable_mask[0]);
|
REMOTE_HUB_S(nasid, PI_INT_MASK0_A, si->irq_enable_mask[0]);
|
||||||
@@ -298,6 +302,7 @@ static unsigned int startup_bridge_irq(unsigned int irq)
|
|||||||
static void shutdown_bridge_irq(unsigned int irq)
|
static void shutdown_bridge_irq(unsigned int irq)
|
||||||
{
|
{
|
||||||
struct bridge_controller *bc = IRQ_TO_BRIDGE(irq);
|
struct bridge_controller *bc = IRQ_TO_BRIDGE(irq);
|
||||||
|
struct hub_data *hub = hub_data(cpu_to_node(bc->irq_cpu));
|
||||||
bridge_t *bridge = bc->base;
|
bridge_t *bridge = bc->base;
|
||||||
struct slice_data *si = cpu_data[bc->irq_cpu].data;
|
struct slice_data *si = cpu_data[bc->irq_cpu].data;
|
||||||
int pin, swlevel;
|
int pin, swlevel;
|
||||||
@@ -313,7 +318,7 @@ static void shutdown_bridge_irq(unsigned int irq)
|
|||||||
swlevel = find_level(&cpu, irq);
|
swlevel = find_level(&cpu, irq);
|
||||||
intr_disconnect_level(cpu, swlevel);
|
intr_disconnect_level(cpu, swlevel);
|
||||||
|
|
||||||
__clear_bit(swlevel, si->irq_alloc_mask);
|
__clear_bit(swlevel, hub->irq_alloc_mask);
|
||||||
si->level_to_irq[swlevel] = -1;
|
si->level_to_irq[swlevel] = -1;
|
||||||
|
|
||||||
bridge->b_int_enable &= ~(1 << pin);
|
bridge->b_int_enable &= ~(1 << pin);
|
||||||
@@ -433,25 +438,24 @@ void install_ipi(void)
|
|||||||
int slice = LOCAL_HUB_L(PI_CPU_NUM);
|
int slice = LOCAL_HUB_L(PI_CPU_NUM);
|
||||||
int cpu = smp_processor_id();
|
int cpu = smp_processor_id();
|
||||||
struct slice_data *si = cpu_data[cpu].data;
|
struct slice_data *si = cpu_data[cpu].data;
|
||||||
hubreg_t mask, set;
|
struct hub_data *hub = hub_data(cpu_to_node(cpu));
|
||||||
|
int resched, call;
|
||||||
|
|
||||||
|
resched = CPU_RESCHED_A_IRQ + slice;
|
||||||
|
__set_bit(resched, hub->irq_alloc_mask);
|
||||||
|
__set_bit(resched, si->irq_enable_mask);
|
||||||
|
LOCAL_HUB_CLR_INTR(resched);
|
||||||
|
|
||||||
|
call = CPU_CALL_A_IRQ + slice;
|
||||||
|
__set_bit(call, hub->irq_alloc_mask);
|
||||||
|
__set_bit(call, si->irq_enable_mask);
|
||||||
|
LOCAL_HUB_CLR_INTR(call);
|
||||||
|
|
||||||
if (slice == 0) {
|
if (slice == 0) {
|
||||||
LOCAL_HUB_CLR_INTR(CPU_RESCHED_A_IRQ);
|
LOCAL_HUB_S(PI_INT_MASK0_A, si->irq_enable_mask[0]);
|
||||||
LOCAL_HUB_CLR_INTR(CPU_CALL_A_IRQ);
|
LOCAL_HUB_S(PI_INT_MASK1_A, si->irq_enable_mask[1]);
|
||||||
mask = LOCAL_HUB_L(PI_INT_MASK0_A); /* Slice A */
|
|
||||||
set = (1UL << CPU_RESCHED_A_IRQ) | (1UL << CPU_CALL_A_IRQ);
|
|
||||||
mask |= set;
|
|
||||||
si->irq_enable_mask[0] |= set;
|
|
||||||
si->irq_alloc_mask[0] |= set;
|
|
||||||
LOCAL_HUB_S(PI_INT_MASK0_A, mask);
|
|
||||||
} else {
|
} else {
|
||||||
LOCAL_HUB_CLR_INTR(CPU_RESCHED_B_IRQ);
|
LOCAL_HUB_S(PI_INT_MASK0_B, si->irq_enable_mask[0]);
|
||||||
LOCAL_HUB_CLR_INTR(CPU_CALL_B_IRQ);
|
LOCAL_HUB_S(PI_INT_MASK1_B, si->irq_enable_mask[1]);
|
||||||
mask = LOCAL_HUB_L(PI_INT_MASK0_B); /* Slice B */
|
|
||||||
set = (1UL << CPU_RESCHED_B_IRQ) | (1UL << CPU_CALL_B_IRQ);
|
|
||||||
mask |= set;
|
|
||||||
si->irq_enable_mask[1] |= set;
|
|
||||||
si->irq_alloc_mask[1] |= set;
|
|
||||||
LOCAL_HUB_S(PI_INT_MASK0_B, mask);
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
Reference in New Issue
Block a user