[MIPS] Consolidate all variants of MIPS cp0 timer interrupt handlers.
Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
This commit is contained in:
@@ -65,8 +65,6 @@
|
|||||||
#define EXT_INTC1_REQ1 5 /* IP 5 */
|
#define EXT_INTC1_REQ1 5 /* IP 5 */
|
||||||
#define MIPS_TIMER_IP 7 /* IP 7 */
|
#define MIPS_TIMER_IP 7 /* IP 7 */
|
||||||
|
|
||||||
extern void mips_timer_interrupt(void);
|
|
||||||
|
|
||||||
void (*board_init_irq)(void);
|
void (*board_init_irq)(void);
|
||||||
|
|
||||||
static DEFINE_SPINLOCK(irq_lock);
|
static DEFINE_SPINLOCK(irq_lock);
|
||||||
@@ -635,7 +633,7 @@ asmlinkage void plat_irq_dispatch(void)
|
|||||||
unsigned int pending = read_c0_status() & read_c0_cause() & ST0_IM;
|
unsigned int pending = read_c0_status() & read_c0_cause() & ST0_IM;
|
||||||
|
|
||||||
if (pending & CAUSEF_IP7)
|
if (pending & CAUSEF_IP7)
|
||||||
mips_timer_interrupt();
|
ll_timer_interrupt(63);
|
||||||
else if (pending & CAUSEF_IP2)
|
else if (pending & CAUSEF_IP2)
|
||||||
intc0_req0_irqdispatch();
|
intc0_req0_irqdispatch();
|
||||||
else if (pending & CAUSEF_IP3)
|
else if (pending & CAUSEF_IP3)
|
||||||
|
@@ -64,48 +64,8 @@ static unsigned long last_pc0, last_match20;
|
|||||||
|
|
||||||
static DEFINE_SPINLOCK(time_lock);
|
static DEFINE_SPINLOCK(time_lock);
|
||||||
|
|
||||||
static inline void ack_r4ktimer(unsigned long newval)
|
|
||||||
{
|
|
||||||
write_c0_compare(newval);
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* There are a lot of conceptually broken versions of the MIPS timer interrupt
|
|
||||||
* handler floating around. This one is rather different, but the algorithm
|
|
||||||
* is provably more robust.
|
|
||||||
*/
|
|
||||||
unsigned long wtimer;
|
unsigned long wtimer;
|
||||||
|
|
||||||
void mips_timer_interrupt(void)
|
|
||||||
{
|
|
||||||
int irq = 63;
|
|
||||||
|
|
||||||
irq_enter();
|
|
||||||
kstat_this_cpu.irqs[irq]++;
|
|
||||||
|
|
||||||
if (r4k_offset == 0)
|
|
||||||
goto null;
|
|
||||||
|
|
||||||
do {
|
|
||||||
kstat_this_cpu.irqs[irq]++;
|
|
||||||
do_timer(1);
|
|
||||||
#ifndef CONFIG_SMP
|
|
||||||
update_process_times(user_mode(get_irq_regs()));
|
|
||||||
#endif
|
|
||||||
r4k_cur += r4k_offset;
|
|
||||||
ack_r4ktimer(r4k_cur);
|
|
||||||
|
|
||||||
} while (((unsigned long)read_c0_count()
|
|
||||||
- r4k_cur) < 0x7fffffff);
|
|
||||||
|
|
||||||
irq_exit();
|
|
||||||
return;
|
|
||||||
|
|
||||||
null:
|
|
||||||
ack_r4ktimer(0);
|
|
||||||
irq_exit();
|
|
||||||
}
|
|
||||||
|
|
||||||
#ifdef CONFIG_PM
|
#ifdef CONFIG_PM
|
||||||
irqreturn_t counter0_irq(int irq, void *dev_id)
|
irqreturn_t counter0_irq(int irq, void *dev_id)
|
||||||
{
|
{
|
||||||
|
@@ -867,7 +867,7 @@ void ipi_decode(struct smtc_ipi *pipi)
|
|||||||
#ifdef CONFIG_SMTC_IDLE_HOOK_DEBUG
|
#ifdef CONFIG_SMTC_IDLE_HOOK_DEBUG
|
||||||
clock_hang_reported[dest_copy] = 0;
|
clock_hang_reported[dest_copy] = 0;
|
||||||
#endif /* CONFIG_SMTC_IDLE_HOOK_DEBUG */
|
#endif /* CONFIG_SMTC_IDLE_HOOK_DEBUG */
|
||||||
local_timer_interrupt(0, NULL);
|
local_timer_interrupt(0);
|
||||||
irq_exit();
|
irq_exit();
|
||||||
break;
|
break;
|
||||||
case LINUX_SMP_IPI:
|
case LINUX_SMP_IPI:
|
||||||
|
@@ -144,7 +144,7 @@ void local_timer_interrupt(int irq, void *dev_id)
|
|||||||
* High-level timer interrupt service routines. This function
|
* High-level timer interrupt service routines. This function
|
||||||
* is set as irqaction->handler and is invoked through do_IRQ.
|
* is set as irqaction->handler and is invoked through do_IRQ.
|
||||||
*/
|
*/
|
||||||
irqreturn_t timer_interrupt(int irq, void *dev_id)
|
static irqreturn_t timer_interrupt(int irq, void *dev_id)
|
||||||
{
|
{
|
||||||
write_seqlock(&xtime_lock);
|
write_seqlock(&xtime_lock);
|
||||||
|
|
||||||
@@ -174,9 +174,10 @@ int null_perf_irq(void)
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
EXPORT_SYMBOL(null_perf_irq);
|
||||||
|
|
||||||
int (*perf_irq)(void) = null_perf_irq;
|
int (*perf_irq)(void) = null_perf_irq;
|
||||||
|
|
||||||
EXPORT_SYMBOL(null_perf_irq);
|
|
||||||
EXPORT_SYMBOL(perf_irq);
|
EXPORT_SYMBOL(perf_irq);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@@ -208,35 +209,79 @@ static inline int handle_perf_irq (int r2)
|
|||||||
!r2;
|
!r2;
|
||||||
}
|
}
|
||||||
|
|
||||||
asmlinkage void ll_timer_interrupt(int irq)
|
void ll_timer_interrupt(int irq, void *dev_id)
|
||||||
{
|
{
|
||||||
|
int cpu = smp_processor_id();
|
||||||
|
|
||||||
|
#ifdef CONFIG_MIPS_MT_SMTC
|
||||||
|
/*
|
||||||
|
* In an SMTC system, one Count/Compare set exists per VPE.
|
||||||
|
* Which TC within a VPE gets the interrupt is essentially
|
||||||
|
* random - we only know that it shouldn't be one with
|
||||||
|
* IXMT set. Whichever TC gets the interrupt needs to
|
||||||
|
* send special interprocessor interrupts to the other
|
||||||
|
* TCs to make sure that they schedule, etc.
|
||||||
|
*
|
||||||
|
* That code is specific to the SMTC kernel, not to
|
||||||
|
* the a particular platform, so it's invoked from
|
||||||
|
* the general MIPS timer_interrupt routine.
|
||||||
|
*/
|
||||||
|
|
||||||
|
/*
|
||||||
|
* We could be here due to timer interrupt,
|
||||||
|
* perf counter overflow, or both.
|
||||||
|
*/
|
||||||
|
(void) handle_perf_irq(1);
|
||||||
|
|
||||||
|
if (read_c0_cause() & (1 << 30)) {
|
||||||
|
/*
|
||||||
|
* There are things we only want to do once per tick
|
||||||
|
* in an "MP" system. One TC of each VPE will take
|
||||||
|
* the actual timer interrupt. The others will get
|
||||||
|
* timer broadcast IPIs. We use whoever it is that takes
|
||||||
|
* the tick on VPE 0 to run the full timer_interrupt().
|
||||||
|
*/
|
||||||
|
if (cpu_data[cpu].vpe_id == 0) {
|
||||||
|
timer_interrupt(irq, NULL);
|
||||||
|
} else {
|
||||||
|
write_c0_compare(read_c0_count() +
|
||||||
|
(mips_hpt_frequency/HZ));
|
||||||
|
local_timer_interrupt(irq, dev_id);
|
||||||
|
}
|
||||||
|
smtc_timer_broadcast(cpu_data[cpu].vpe_id);
|
||||||
|
}
|
||||||
|
#else /* CONFIG_MIPS_MT_SMTC */
|
||||||
int r2 = cpu_has_mips_r2;
|
int r2 = cpu_has_mips_r2;
|
||||||
|
|
||||||
irq_enter();
|
|
||||||
kstat_this_cpu.irqs[irq]++;
|
|
||||||
|
|
||||||
if (handle_perf_irq(r2))
|
if (handle_perf_irq(r2))
|
||||||
goto out;
|
return;
|
||||||
|
|
||||||
if (r2 && ((read_c0_cause() & (1 << 30)) == 0))
|
if (r2 && ((read_c0_cause() & (1 << 30)) == 0))
|
||||||
goto out;
|
return;
|
||||||
|
|
||||||
|
if (cpu == 0) {
|
||||||
|
/*
|
||||||
|
* CPU 0 handles the global timer interrupt job and process
|
||||||
|
* accounting resets count/compare registers to trigger next
|
||||||
|
* timer int.
|
||||||
|
*/
|
||||||
timer_interrupt(irq, NULL);
|
timer_interrupt(irq, NULL);
|
||||||
|
} else {
|
||||||
|
/* Everyone else needs to reset the timer int here as
|
||||||
|
ll_local_timer_interrupt doesn't */
|
||||||
|
/*
|
||||||
|
* FIXME: need to cope with counter underflow.
|
||||||
|
* More support needs to be added to kernel/time for
|
||||||
|
* counter/timer interrupts on multiple CPU's
|
||||||
|
*/
|
||||||
|
write_c0_compare(read_c0_count() + (mips_hpt_frequency/HZ));
|
||||||
|
|
||||||
out:
|
/*
|
||||||
irq_exit();
|
* Other CPUs should do profiling and process accounting
|
||||||
}
|
*/
|
||||||
|
local_timer_interrupt(irq, dev_id);
|
||||||
asmlinkage void ll_local_timer_interrupt(int irq)
|
}
|
||||||
{
|
#endif /* CONFIG_MIPS_MT_SMTC */
|
||||||
irq_enter();
|
|
||||||
if (smp_processor_id() != 0)
|
|
||||||
kstat_this_cpu.irqs[irq]++;
|
|
||||||
|
|
||||||
/* we keep interrupt disabled all the time */
|
|
||||||
local_timer_interrupt(irq, NULL);
|
|
||||||
|
|
||||||
irq_exit();
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@@ -67,108 +67,6 @@ static void mips_perf_dispatch(void)
|
|||||||
do_IRQ(cp0_perfcount_irq);
|
do_IRQ(cp0_perfcount_irq);
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
|
||||||
* Redeclare until I get around mopping the timer code insanity on MIPS.
|
|
||||||
*/
|
|
||||||
extern int null_perf_irq(void);
|
|
||||||
|
|
||||||
extern int (*perf_irq)(void);
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Possibly handle a performance counter interrupt.
|
|
||||||
* Return true if the timer interrupt should not be checked
|
|
||||||
*/
|
|
||||||
static inline int handle_perf_irq (int r2)
|
|
||||||
{
|
|
||||||
/*
|
|
||||||
* The performance counter overflow interrupt may be shared with the
|
|
||||||
* timer interrupt (cp0_perfcount_irq < 0). If it is and a
|
|
||||||
* performance counter has overflowed (perf_irq() == IRQ_HANDLED)
|
|
||||||
* and we can't reliably determine if a counter interrupt has also
|
|
||||||
* happened (!r2) then don't check for a timer interrupt.
|
|
||||||
*/
|
|
||||||
return (cp0_perfcount_irq < 0) &&
|
|
||||||
perf_irq() == IRQ_HANDLED &&
|
|
||||||
!r2;
|
|
||||||
}
|
|
||||||
|
|
||||||
irqreturn_t mips_timer_interrupt(int irq, void *dev_id)
|
|
||||||
{
|
|
||||||
int cpu = smp_processor_id();
|
|
||||||
|
|
||||||
#ifdef CONFIG_MIPS_MT_SMTC
|
|
||||||
/*
|
|
||||||
* In an SMTC system, one Count/Compare set exists per VPE.
|
|
||||||
* Which TC within a VPE gets the interrupt is essentially
|
|
||||||
* random - we only know that it shouldn't be one with
|
|
||||||
* IXMT set. Whichever TC gets the interrupt needs to
|
|
||||||
* send special interprocessor interrupts to the other
|
|
||||||
* TCs to make sure that they schedule, etc.
|
|
||||||
*
|
|
||||||
* That code is specific to the SMTC kernel, not to
|
|
||||||
* the a particular platform, so it's invoked from
|
|
||||||
* the general MIPS timer_interrupt routine.
|
|
||||||
*/
|
|
||||||
|
|
||||||
/*
|
|
||||||
* We could be here due to timer interrupt,
|
|
||||||
* perf counter overflow, or both.
|
|
||||||
*/
|
|
||||||
(void) handle_perf_irq(1);
|
|
||||||
|
|
||||||
if (read_c0_cause() & (1 << 30)) {
|
|
||||||
/*
|
|
||||||
* There are things we only want to do once per tick
|
|
||||||
* in an "MP" system. One TC of each VPE will take
|
|
||||||
* the actual timer interrupt. The others will get
|
|
||||||
* timer broadcast IPIs. We use whoever it is that takes
|
|
||||||
* the tick on VPE 0 to run the full timer_interrupt().
|
|
||||||
*/
|
|
||||||
if (cpu_data[cpu].vpe_id == 0) {
|
|
||||||
timer_interrupt(irq, NULL);
|
|
||||||
} else {
|
|
||||||
write_c0_compare(read_c0_count() +
|
|
||||||
(mips_hpt_frequency/HZ));
|
|
||||||
local_timer_interrupt(irq, dev_id);
|
|
||||||
}
|
|
||||||
smtc_timer_broadcast();
|
|
||||||
}
|
|
||||||
#else /* CONFIG_MIPS_MT_SMTC */
|
|
||||||
int r2 = cpu_has_mips_r2;
|
|
||||||
|
|
||||||
if (handle_perf_irq(r2))
|
|
||||||
goto out;
|
|
||||||
|
|
||||||
if (r2 && ((read_c0_cause() & (1 << 30)) == 0))
|
|
||||||
goto out;
|
|
||||||
|
|
||||||
if (cpu == 0) {
|
|
||||||
/*
|
|
||||||
* CPU 0 handles the global timer interrupt job and process
|
|
||||||
* accounting resets count/compare registers to trigger next
|
|
||||||
* timer int.
|
|
||||||
*/
|
|
||||||
timer_interrupt(irq, NULL);
|
|
||||||
} else {
|
|
||||||
/* Everyone else needs to reset the timer int here as
|
|
||||||
ll_local_timer_interrupt doesn't */
|
|
||||||
/*
|
|
||||||
* FIXME: need to cope with counter underflow.
|
|
||||||
* More support needs to be added to kernel/time for
|
|
||||||
* counter/timer interrupts on multiple CPU's
|
|
||||||
*/
|
|
||||||
write_c0_compare(read_c0_count() + (mips_hpt_frequency/HZ));
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Other CPUs should do profiling and process accounting
|
|
||||||
*/
|
|
||||||
local_timer_interrupt(irq, dev_id);
|
|
||||||
}
|
|
||||||
out:
|
|
||||||
#endif /* CONFIG_MIPS_MT_SMTC */
|
|
||||||
return IRQ_HANDLED;
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Estimate CPU frequency. Sets mips_hpt_frequency as a side-effect
|
* Estimate CPU frequency. Sets mips_hpt_frequency as a side-effect
|
||||||
*/
|
*/
|
||||||
@@ -246,7 +144,7 @@ void __init plat_time_init(void)
|
|||||||
mips_scroll_message();
|
mips_scroll_message();
|
||||||
}
|
}
|
||||||
|
|
||||||
irqreturn_t mips_perf_interrupt(int irq, void *dev_id)
|
static irqreturn_t mips_perf_interrupt(int irq, void *dev_id)
|
||||||
{
|
{
|
||||||
return perf_irq();
|
return perf_irq();
|
||||||
}
|
}
|
||||||
@@ -257,8 +155,10 @@ static struct irqaction perf_irqaction = {
|
|||||||
.name = "performance",
|
.name = "performance",
|
||||||
};
|
};
|
||||||
|
|
||||||
void __init plat_perf_setup(struct irqaction *irq)
|
void __init plat_perf_setup(void)
|
||||||
{
|
{
|
||||||
|
struct irqaction *irq = &perf_irqaction;
|
||||||
|
|
||||||
cp0_perfcount_irq = -1;
|
cp0_perfcount_irq = -1;
|
||||||
|
|
||||||
#ifdef MSC01E_INT_BASE
|
#ifdef MSC01E_INT_BASE
|
||||||
@@ -297,8 +197,6 @@ void __init plat_timer_setup(struct irqaction *irq)
|
|||||||
mips_cpu_timer_irq = MIPS_CPU_IRQ_BASE + cp0_compare_irq;
|
mips_cpu_timer_irq = MIPS_CPU_IRQ_BASE + cp0_compare_irq;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* we are using the cpu counter for timer interrupts */
|
|
||||||
irq->handler = mips_timer_interrupt; /* we use our own handler */
|
|
||||||
#ifdef CONFIG_MIPS_MT_SMTC
|
#ifdef CONFIG_MIPS_MT_SMTC
|
||||||
setup_irq_smtc(mips_cpu_timer_irq, irq, 0x100 << cp0_compare_irq);
|
setup_irq_smtc(mips_cpu_timer_irq, irq, 0x100 << cp0_compare_irq);
|
||||||
#else
|
#else
|
||||||
@@ -308,5 +206,5 @@ void __init plat_timer_setup(struct irqaction *irq)
|
|||||||
set_irq_handler(mips_cpu_timer_irq, handle_percpu_irq);
|
set_irq_handler(mips_cpu_timer_irq, handle_percpu_irq);
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
plat_perf_setup(&perf_irqaction);
|
plat_perf_setup();
|
||||||
}
|
}
|
||||||
|
@@ -23,77 +23,6 @@
|
|||||||
|
|
||||||
unsigned long cpu_khz;
|
unsigned long cpu_khz;
|
||||||
|
|
||||||
irqreturn_t sim_timer_interrupt(int irq, void *dev_id)
|
|
||||||
{
|
|
||||||
#ifdef CONFIG_SMP
|
|
||||||
int cpu = smp_processor_id();
|
|
||||||
|
|
||||||
/*
|
|
||||||
* CPU 0 handles the global timer interrupt job
|
|
||||||
* resets count/compare registers to trigger next timer int.
|
|
||||||
*/
|
|
||||||
#ifndef CONFIG_MIPS_MT_SMTC
|
|
||||||
if (cpu == 0) {
|
|
||||||
timer_interrupt(irq, dev_id);
|
|
||||||
} else {
|
|
||||||
/* Everyone else needs to reset the timer int here as
|
|
||||||
ll_local_timer_interrupt doesn't */
|
|
||||||
/*
|
|
||||||
* FIXME: need to cope with counter underflow.
|
|
||||||
* More support needs to be added to kernel/time for
|
|
||||||
* counter/timer interrupts on multiple CPU's
|
|
||||||
*/
|
|
||||||
write_c0_compare (read_c0_count() + ( mips_hpt_frequency/HZ));
|
|
||||||
}
|
|
||||||
#else /* SMTC */
|
|
||||||
/*
|
|
||||||
* In SMTC system, one Count/Compare set exists per VPE.
|
|
||||||
* Which TC within a VPE gets the interrupt is essentially
|
|
||||||
* random - we only know that it shouldn't be one with
|
|
||||||
* IXMT set. Whichever TC gets the interrupt needs to
|
|
||||||
* send special interprocessor interrupts to the other
|
|
||||||
* TCs to make sure that they schedule, etc.
|
|
||||||
*
|
|
||||||
* That code is specific to the SMTC kernel, not to
|
|
||||||
* the simulation platform, so it's invoked from
|
|
||||||
* the general MIPS timer_interrupt routine.
|
|
||||||
*
|
|
||||||
* We have a problem in that the interrupt vector code
|
|
||||||
* had to turn off the timer IM bit to avoid redundant
|
|
||||||
* entries, but we may never get to mips_cpu_irq_end
|
|
||||||
* to turn it back on again if the scheduler gets
|
|
||||||
* involved. So we clear the pending timer here,
|
|
||||||
* and re-enable the mask...
|
|
||||||
*/
|
|
||||||
|
|
||||||
int vpflags = dvpe();
|
|
||||||
write_c0_compare (read_c0_count() - 1);
|
|
||||||
clear_c0_cause(0x100 << cp0_compare_irq);
|
|
||||||
set_c0_status(0x100 << cp0_compare_irq);
|
|
||||||
irq_enable_hazard();
|
|
||||||
evpe(vpflags);
|
|
||||||
|
|
||||||
if (cpu_data[cpu].vpe_id == 0)
|
|
||||||
timer_interrupt(irq, dev_id);
|
|
||||||
else
|
|
||||||
write_c0_compare (read_c0_count() + ( mips_hpt_frequency/HZ));
|
|
||||||
smtc_timer_broadcast(cpu_data[cpu].vpe_id);
|
|
||||||
|
|
||||||
#endif /* CONFIG_MIPS_MT_SMTC */
|
|
||||||
|
|
||||||
/*
|
|
||||||
* every CPU should do profiling and process accounting
|
|
||||||
*/
|
|
||||||
local_timer_interrupt (irq, dev_id);
|
|
||||||
|
|
||||||
return IRQ_HANDLED;
|
|
||||||
#else
|
|
||||||
return timer_interrupt (irq, dev_id);
|
|
||||||
#endif
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Estimate CPU frequency. Sets mips_hpt_frequency as a side-effect
|
* Estimate CPU frequency. Sets mips_hpt_frequency as a side-effect
|
||||||
*/
|
*/
|
||||||
@@ -185,7 +114,6 @@ void __init plat_timer_setup(struct irqaction *irq)
|
|||||||
}
|
}
|
||||||
|
|
||||||
/* we are using the cpu counter for timer interrupts */
|
/* we are using the cpu counter for timer interrupts */
|
||||||
irq->handler = sim_timer_interrupt;
|
|
||||||
setup_irq(mips_cpu_timer_irq, irq);
|
setup_irq(mips_cpu_timer_irq, irq);
|
||||||
|
|
||||||
#ifdef CONFIG_SMP
|
#ifdef CONFIG_SMP
|
||||||
|
@@ -20,10 +20,10 @@
|
|||||||
#include <asm/mipsregs.h>
|
#include <asm/mipsregs.h>
|
||||||
#include <asm/addrspace.h>
|
#include <asm/addrspace.h>
|
||||||
#include <asm/irq_cpu.h>
|
#include <asm/irq_cpu.h>
|
||||||
|
|
||||||
#include <asm/sgi/ioc.h>
|
#include <asm/sgi/ioc.h>
|
||||||
#include <asm/sgi/hpc3.h>
|
#include <asm/sgi/hpc3.h>
|
||||||
#include <asm/sgi/ip22.h>
|
#include <asm/sgi/ip22.h>
|
||||||
|
#include <asm/time.h>
|
||||||
|
|
||||||
/* #define DEBUG_SGINT */
|
/* #define DEBUG_SGINT */
|
||||||
|
|
||||||
@@ -204,7 +204,6 @@ static struct irqaction map1_cascade = {
|
|||||||
#define SGI_INTERRUPTS SGINT_LOCAL3
|
#define SGI_INTERRUPTS SGINT_LOCAL3
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
extern void indy_r4k_timer_interrupt(void);
|
|
||||||
extern void indy_8254timer_irq(void);
|
extern void indy_8254timer_irq(void);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@@ -243,7 +242,7 @@ asmlinkage void plat_irq_dispatch(void)
|
|||||||
* First we check for r4k counter/timer IRQ.
|
* First we check for r4k counter/timer IRQ.
|
||||||
*/
|
*/
|
||||||
if (pending & CAUSEF_IP7)
|
if (pending & CAUSEF_IP7)
|
||||||
indy_r4k_timer_interrupt();
|
ll_timer_interrupt(SGI_TIMER_IRQ, NULL);
|
||||||
else if (pending & CAUSEF_IP2)
|
else if (pending & CAUSEF_IP2)
|
||||||
indy_local0_irqdispatch();
|
indy_local0_irqdispatch();
|
||||||
else if (pending & CAUSEF_IP3)
|
else if (pending & CAUSEF_IP3)
|
||||||
|
@@ -189,16 +189,6 @@ void indy_8254timer_irq(void)
|
|||||||
irq_exit();
|
irq_exit();
|
||||||
}
|
}
|
||||||
|
|
||||||
void indy_r4k_timer_interrupt(void)
|
|
||||||
{
|
|
||||||
int irq = SGI_TIMER_IRQ;
|
|
||||||
|
|
||||||
irq_enter();
|
|
||||||
kstat_this_cpu.irqs[irq]++;
|
|
||||||
timer_interrupt(irq, NULL);
|
|
||||||
irq_exit();
|
|
||||||
}
|
|
||||||
|
|
||||||
void __init plat_timer_setup(struct irqaction *irq)
|
void __init plat_timer_setup(struct irqaction *irq)
|
||||||
{
|
{
|
||||||
/* over-write the handler, we use our own way */
|
/* over-write the handler, we use our own way */
|
||||||
|
@@ -103,18 +103,7 @@ void bcm1480_timer_interrupt(void)
|
|||||||
__raw_writeq(M_SCD_TIMER_ENABLE|M_SCD_TIMER_MODE_CONTINUOUS,
|
__raw_writeq(M_SCD_TIMER_ENABLE|M_SCD_TIMER_MODE_CONTINUOUS,
|
||||||
IOADDR(A_SCD_TIMER_REGISTER(cpu, R_SCD_TIMER_CFG)));
|
IOADDR(A_SCD_TIMER_REGISTER(cpu, R_SCD_TIMER_CFG)));
|
||||||
|
|
||||||
if (cpu == 0) {
|
|
||||||
/*
|
|
||||||
* CPU 0 handles the global timer interrupt job
|
|
||||||
*/
|
|
||||||
ll_timer_interrupt(irq);
|
ll_timer_interrupt(irq);
|
||||||
}
|
|
||||||
else {
|
|
||||||
/*
|
|
||||||
* other CPUs should just do profiling and process accounting
|
|
||||||
*/
|
|
||||||
ll_local_timer_interrupt(irq);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static cycle_t bcm1480_hpt_read(void)
|
static cycle_t bcm1480_hpt_read(void)
|
||||||
|
@@ -125,18 +125,7 @@ void sb1250_timer_interrupt(void)
|
|||||||
____raw_writeq(M_SCD_TIMER_ENABLE | M_SCD_TIMER_MODE_CONTINUOUS,
|
____raw_writeq(M_SCD_TIMER_ENABLE | M_SCD_TIMER_MODE_CONTINUOUS,
|
||||||
IOADDR(A_SCD_TIMER_REGISTER(cpu, R_SCD_TIMER_CFG)));
|
IOADDR(A_SCD_TIMER_REGISTER(cpu, R_SCD_TIMER_CFG)));
|
||||||
|
|
||||||
if (cpu == 0) {
|
|
||||||
/*
|
|
||||||
* CPU 0 handles the global timer interrupt job
|
|
||||||
*/
|
|
||||||
ll_timer_interrupt(irq);
|
ll_timer_interrupt(irq);
|
||||||
}
|
|
||||||
else {
|
|
||||||
/*
|
|
||||||
* other CPUs should just do profiling and process accounting
|
|
||||||
*/
|
|
||||||
ll_local_timer_interrupt(irq);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@@ -49,20 +49,14 @@ extern void (*mips_timer_ack)(void);
|
|||||||
extern struct clocksource clocksource_mips;
|
extern struct clocksource clocksource_mips;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* high-level timer interrupt routines.
|
* The low-level timer interrupt routine.
|
||||||
*/
|
*/
|
||||||
extern irqreturn_t timer_interrupt(int irq, void *dev_id);
|
extern void ll_timer_interrupt(int irq, void *dev_id);
|
||||||
|
|
||||||
/*
|
|
||||||
* the corresponding low-level timer interrupt routine.
|
|
||||||
*/
|
|
||||||
extern asmlinkage void ll_timer_interrupt(int irq);
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* profiling and process accouting is done separately in local_timer_interrupt
|
* profiling and process accouting is done separately in local_timer_interrupt
|
||||||
*/
|
*/
|
||||||
extern void local_timer_interrupt(int irq, void *dev_id);
|
extern void local_timer_interrupt(int irq, void *dev_id);
|
||||||
extern asmlinkage void ll_local_timer_interrupt(int irq);
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* board specific routines required by time_init().
|
* board specific routines required by time_init().
|
||||||
@@ -78,4 +72,10 @@ extern void plat_timer_setup(struct irqaction *irq);
|
|||||||
*/
|
*/
|
||||||
extern unsigned int mips_hpt_frequency;
|
extern unsigned int mips_hpt_frequency;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* The performance counter IRQ on MIPS is a close relative to the timer IRQ
|
||||||
|
* so it lives here.
|
||||||
|
*/
|
||||||
|
extern int (*perf_irq)(void);
|
||||||
|
|
||||||
#endif /* _ASM_TIME_H */
|
#endif /* _ASM_TIME_H */
|
||||||
|
Reference in New Issue
Block a user