x86: janitor stack overflow warning patch

Add KERN_WARNING to the printk as this could not be done in the
original patch, which allegedly only moves code around.

Un#ifdef do_IRQ.

Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
This commit is contained in:
Thomas Gleixner
2008-05-05 15:58:15 +02:00
committed by Ingo Molnar
parent 04b361abfd
commit de9b10af12

View File

@@ -48,6 +48,29 @@ void ack_bad_irq(unsigned int irq)
#endif #endif
} }
#ifdef CONFIG_DEBUG_STACKOVERFLOW
/* Debugging check for stack overflow: is there less than 1KB free? */
static int check_stack_overflow(void)
{
long sp;
__asm__ __volatile__("andl %%esp,%0" :
"=r" (sp) : "0" (THREAD_SIZE - 1));
return sp < (sizeof(struct thread_info) + STACK_WARN);
}
static void print_stack_overflow(void)
{
printk(KERN_WARNING "low stack detected by irq handler\n");
dump_stack();
}
#else
static inline int check_stack_overflow(void) { return 0; }
static inline void print_stack_overflow(void) { }
#endif
#ifdef CONFIG_4KSTACKS #ifdef CONFIG_4KSTACKS
/* /*
* per-CPU IRQ handling contexts (thread information and stack) * per-CPU IRQ handling contexts (thread information and stack)
@@ -59,18 +82,12 @@ union irq_ctx {
static union irq_ctx *hardirq_ctx[NR_CPUS] __read_mostly; static union irq_ctx *hardirq_ctx[NR_CPUS] __read_mostly;
static union irq_ctx *softirq_ctx[NR_CPUS] __read_mostly; static union irq_ctx *softirq_ctx[NR_CPUS] __read_mostly;
#endif
static void stack_overflow(void) static inline void call_on_stack(void *func, void *stack,
{ unsigned long arg1, void *arg2)
printk("low stack detected by irq handler\n");
dump_stack();
}
static inline void call_on_stack2(void *func, void *stack,
unsigned long arg1, unsigned long arg2)
{ {
unsigned long bx; unsigned long bx;
asm volatile( asm volatile(
" xchgl %%ebx,%%esp \n" " xchgl %%ebx,%%esp \n"
" call *%%edi \n" " call *%%edi \n"
@@ -81,44 +98,11 @@ static inline void call_on_stack2(void *func, void *stack,
: "memory", "cc", "ecx"); : "memory", "cc", "ecx");
} }
/* static inline int
* do_IRQ handles all normal device IRQ's (the special execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
* SMP cross-CPU interrupts have their own specific {
* handlers).
*/
unsigned int do_IRQ(struct pt_regs *regs)
{
struct pt_regs *old_regs;
/* high bit used in ret_from_ code */
int irq = ~regs->orig_ax;
struct irq_desc *desc = irq_desc + irq;
#ifdef CONFIG_4KSTACKS
union irq_ctx *curctx, *irqctx; union irq_ctx *curctx, *irqctx;
u32 *isp; u32 *isp;
#endif
int overflow = 0;
if (unlikely((unsigned)irq >= NR_IRQS)) {
printk(KERN_EMERG "%s: cannot handle IRQ %d\n",
__func__, irq);
BUG();
}
old_regs = set_irq_regs(regs);
irq_enter();
#ifdef CONFIG_DEBUG_STACKOVERFLOW
/* Debugging check for stack overflow: is there less than 1KB free? */
{
long sp;
__asm__ __volatile__("andl %%esp,%0" :
"=r" (sp) : "0" (THREAD_SIZE - 1));
if (unlikely(sp < (sizeof(struct thread_info) + STACK_WARN)))
overflow = 1;
}
#endif
#ifdef CONFIG_4KSTACKS
curctx = (union irq_ctx *) current_thread_info(); curctx = (union irq_ctx *) current_thread_info();
irqctx = hardirq_ctx[smp_processor_id()]; irqctx = hardirq_ctx[smp_processor_id()];
@@ -129,31 +113,61 @@ unsigned int do_IRQ(struct pt_regs *regs)
* handler) we can't do that and just have to keep using the * handler) we can't do that and just have to keep using the
* current stack (which is the irq stack already after all) * current stack (which is the irq stack already after all)
*/ */
if (curctx != irqctx) { if (unlikely(curctx == irqctx))
/* build the stack frame on the IRQ stack */ return 0;
isp = (u32*) ((char*)irqctx + sizeof(*irqctx));
irqctx->tinfo.task = curctx->tinfo.task;
irqctx->tinfo.previous_esp = current_stack_pointer;
/* /* build the stack frame on the IRQ stack */
* Copy the softirq bits in preempt_count so that the isp = (u32 *) ((char*)irqctx + sizeof(*irqctx));
* softirq checks work in the hardirq context. irqctx->tinfo.task = curctx->tinfo.task;
*/ irqctx->tinfo.previous_esp = current_stack_pointer;
irqctx->tinfo.preempt_count =
(irqctx->tinfo.preempt_count & ~SOFTIRQ_MASK) |
(curctx->tinfo.preempt_count & SOFTIRQ_MASK);
/* Execute warning on interrupt stack */ /*
if (unlikely(overflow)) * Copy the softirq bits in preempt_count so that the
call_on_stack2(stack_overflow, isp, 0, 0); * softirq checks work in the hardirq context.
*/
irqctx->tinfo.preempt_count =
(irqctx->tinfo.preempt_count & ~SOFTIRQ_MASK) |
(curctx->tinfo.preempt_count & SOFTIRQ_MASK);
call_on_stack2(desc->handle_irq, isp, irq, (unsigned long)desc); if (unlikely(overflow))
} else call_on_stack(print_stack_overflow, isp, 0, NULL);
call_on_stack(desc->handle_irq, isp, irq, desc);
return 1;
}
#else
static inline int
execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq) { return 0; }
#endif #endif
{
/* AK: Slightly bogus here */ /*
if (overflow) * do_IRQ handles all normal device IRQ's (the special
stack_overflow(); * SMP cross-CPU interrupts have their own specific
* handlers).
*/
unsigned int do_IRQ(struct pt_regs *regs)
{
struct pt_regs *old_regs;
/* high bit used in ret_from_ code */
int overflow, irq = ~regs->orig_ax;
struct irq_desc *desc = irq_desc + irq;
if (unlikely((unsigned)irq >= NR_IRQS)) {
printk(KERN_EMERG "%s: cannot handle IRQ %d\n",
__func__, irq);
BUG();
}
old_regs = set_irq_regs(regs);
irq_enter();
overflow = check_stack_overflow();
if (!execute_on_irq_stack(overflow, desc, irq)) {
if (unlikely(overflow))
print_stack_overflow();
desc->handle_irq(irq, desc); desc->handle_irq(irq, desc);
} }