Merge branch 'core-debug-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip
* 'core-debug-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip: debug lockups: Improve lockup detection, fix generic arch fallback debug lockups: Improve lockup detection
This commit is contained in:
@@ -89,8 +89,8 @@ static inline unsigned long get_softint(void)
|
|||||||
return retval;
|
return retval;
|
||||||
}
|
}
|
||||||
|
|
||||||
void __trigger_all_cpu_backtrace(void);
|
void arch_trigger_all_cpu_backtrace(void);
|
||||||
#define trigger_all_cpu_backtrace() __trigger_all_cpu_backtrace()
|
#define arch_trigger_all_cpu_backtrace arch_trigger_all_cpu_backtrace
|
||||||
|
|
||||||
extern void *hardirq_stack[NR_CPUS];
|
extern void *hardirq_stack[NR_CPUS];
|
||||||
extern void *softirq_stack[NR_CPUS];
|
extern void *softirq_stack[NR_CPUS];
|
||||||
|
@@ -251,7 +251,7 @@ static void __global_reg_poll(struct global_reg_snapshot *gp)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void __trigger_all_cpu_backtrace(void)
|
void arch_trigger_all_cpu_backtrace(void)
|
||||||
{
|
{
|
||||||
struct thread_info *tp = current_thread_info();
|
struct thread_info *tp = current_thread_info();
|
||||||
struct pt_regs *regs = get_irq_regs();
|
struct pt_regs *regs = get_irq_regs();
|
||||||
@@ -304,7 +304,7 @@ void __trigger_all_cpu_backtrace(void)
|
|||||||
|
|
||||||
static void sysrq_handle_globreg(int key, struct tty_struct *tty)
|
static void sysrq_handle_globreg(int key, struct tty_struct *tty)
|
||||||
{
|
{
|
||||||
__trigger_all_cpu_backtrace();
|
arch_trigger_all_cpu_backtrace();
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct sysrq_key_op sparc_globalreg_op = {
|
static struct sysrq_key_op sparc_globalreg_op = {
|
||||||
|
@@ -45,8 +45,8 @@ extern int proc_nmi_enabled(struct ctl_table *, int , struct file *,
|
|||||||
void __user *, size_t *, loff_t *);
|
void __user *, size_t *, loff_t *);
|
||||||
extern int unknown_nmi_panic;
|
extern int unknown_nmi_panic;
|
||||||
|
|
||||||
void __trigger_all_cpu_backtrace(void);
|
void arch_trigger_all_cpu_backtrace(void);
|
||||||
#define trigger_all_cpu_backtrace() __trigger_all_cpu_backtrace()
|
#define arch_trigger_all_cpu_backtrace arch_trigger_all_cpu_backtrace
|
||||||
|
|
||||||
static inline void localise_nmi_watchdog(void)
|
static inline void localise_nmi_watchdog(void)
|
||||||
{
|
{
|
||||||
|
@@ -39,7 +39,7 @@
|
|||||||
int unknown_nmi_panic;
|
int unknown_nmi_panic;
|
||||||
int nmi_watchdog_enabled;
|
int nmi_watchdog_enabled;
|
||||||
|
|
||||||
static cpumask_var_t backtrace_mask;
|
static cpumask_t backtrace_mask __read_mostly;
|
||||||
|
|
||||||
/* nmi_active:
|
/* nmi_active:
|
||||||
* >0: the lapic NMI watchdog is active, but can be disabled
|
* >0: the lapic NMI watchdog is active, but can be disabled
|
||||||
@@ -138,7 +138,6 @@ int __init check_nmi_watchdog(void)
|
|||||||
if (!prev_nmi_count)
|
if (!prev_nmi_count)
|
||||||
goto error;
|
goto error;
|
||||||
|
|
||||||
alloc_cpumask_var(&backtrace_mask, GFP_KERNEL|__GFP_ZERO);
|
|
||||||
printk(KERN_INFO "Testing NMI watchdog ... ");
|
printk(KERN_INFO "Testing NMI watchdog ... ");
|
||||||
|
|
||||||
#ifdef CONFIG_SMP
|
#ifdef CONFIG_SMP
|
||||||
@@ -415,14 +414,17 @@ nmi_watchdog_tick(struct pt_regs *regs, unsigned reason)
|
|||||||
}
|
}
|
||||||
|
|
||||||
/* We can be called before check_nmi_watchdog, hence NULL check. */
|
/* We can be called before check_nmi_watchdog, hence NULL check. */
|
||||||
if (backtrace_mask != NULL && cpumask_test_cpu(cpu, backtrace_mask)) {
|
if (cpumask_test_cpu(cpu, &backtrace_mask)) {
|
||||||
static DEFINE_SPINLOCK(lock); /* Serialise the printks */
|
static DEFINE_SPINLOCK(lock); /* Serialise the printks */
|
||||||
|
|
||||||
spin_lock(&lock);
|
spin_lock(&lock);
|
||||||
printk(KERN_WARNING "NMI backtrace for cpu %d\n", cpu);
|
printk(KERN_WARNING "NMI backtrace for cpu %d\n", cpu);
|
||||||
|
show_regs(regs);
|
||||||
dump_stack();
|
dump_stack();
|
||||||
spin_unlock(&lock);
|
spin_unlock(&lock);
|
||||||
cpumask_clear_cpu(cpu, backtrace_mask);
|
cpumask_clear_cpu(cpu, &backtrace_mask);
|
||||||
|
|
||||||
|
rc = 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Could check oops_in_progress here too, but it's safer not to */
|
/* Could check oops_in_progress here too, but it's safer not to */
|
||||||
@@ -552,14 +554,18 @@ int do_nmi_callback(struct pt_regs *regs, int cpu)
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
void __trigger_all_cpu_backtrace(void)
|
void arch_trigger_all_cpu_backtrace(void)
|
||||||
{
|
{
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
cpumask_copy(backtrace_mask, cpu_online_mask);
|
cpumask_copy(&backtrace_mask, cpu_online_mask);
|
||||||
|
|
||||||
|
printk(KERN_INFO "sending NMI to all CPUs:\n");
|
||||||
|
apic->send_IPI_all(NMI_VECTOR);
|
||||||
|
|
||||||
/* Wait for up to 10 seconds for all CPUs to do the backtrace */
|
/* Wait for up to 10 seconds for all CPUs to do the backtrace */
|
||||||
for (i = 0; i < 10 * 1000; i++) {
|
for (i = 0; i < 10 * 1000; i++) {
|
||||||
if (cpumask_empty(backtrace_mask))
|
if (cpumask_empty(&backtrace_mask))
|
||||||
break;
|
break;
|
||||||
mdelay(1);
|
mdelay(1);
|
||||||
}
|
}
|
||||||
|
@@ -24,6 +24,7 @@
|
|||||||
#include <linux/sysrq.h>
|
#include <linux/sysrq.h>
|
||||||
#include <linux/kbd_kern.h>
|
#include <linux/kbd_kern.h>
|
||||||
#include <linux/proc_fs.h>
|
#include <linux/proc_fs.h>
|
||||||
|
#include <linux/nmi.h>
|
||||||
#include <linux/quotaops.h>
|
#include <linux/quotaops.h>
|
||||||
#include <linux/perf_counter.h>
|
#include <linux/perf_counter.h>
|
||||||
#include <linux/kernel.h>
|
#include <linux/kernel.h>
|
||||||
@@ -222,13 +223,21 @@ static DECLARE_WORK(sysrq_showallcpus, sysrq_showregs_othercpus);
|
|||||||
|
|
||||||
static void sysrq_handle_showallcpus(int key, struct tty_struct *tty)
|
static void sysrq_handle_showallcpus(int key, struct tty_struct *tty)
|
||||||
{
|
{
|
||||||
|
/*
|
||||||
|
* Fall back to the workqueue based printing if the
|
||||||
|
* backtrace printing did not succeed or the
|
||||||
|
* architecture has no support for it:
|
||||||
|
*/
|
||||||
|
if (!trigger_all_cpu_backtrace()) {
|
||||||
struct pt_regs *regs = get_irq_regs();
|
struct pt_regs *regs = get_irq_regs();
|
||||||
|
|
||||||
if (regs) {
|
if (regs) {
|
||||||
printk(KERN_INFO "CPU%d:\n", smp_processor_id());
|
printk(KERN_INFO "CPU%d:\n", smp_processor_id());
|
||||||
show_regs(regs);
|
show_regs(regs);
|
||||||
}
|
}
|
||||||
schedule_work(&sysrq_showallcpus);
|
schedule_work(&sysrq_showallcpus);
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
static struct sysrq_key_op sysrq_showallcpus_op = {
|
static struct sysrq_key_op sysrq_showallcpus_op = {
|
||||||
.handler = sysrq_handle_showallcpus,
|
.handler = sysrq_handle_showallcpus,
|
||||||
|
@@ -28,8 +28,23 @@ static inline void acpi_nmi_disable(void) { }
|
|||||||
static inline void acpi_nmi_enable(void) { }
|
static inline void acpi_nmi_enable(void) { }
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#ifndef trigger_all_cpu_backtrace
|
/*
|
||||||
#define trigger_all_cpu_backtrace() do { } while (0)
|
* Create trigger_all_cpu_backtrace() out of the arch-provided
|
||||||
|
* base function. Return whether such support was available,
|
||||||
|
* to allow calling code to fall back to some other mechanism:
|
||||||
|
*/
|
||||||
|
#ifdef arch_trigger_all_cpu_backtrace
|
||||||
|
static inline bool trigger_all_cpu_backtrace(void)
|
||||||
|
{
|
||||||
|
arch_trigger_all_cpu_backtrace();
|
||||||
|
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
#else
|
||||||
|
static inline bool trigger_all_cpu_backtrace(void)
|
||||||
|
{
|
||||||
|
return false;
|
||||||
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
@@ -35,6 +35,7 @@
|
|||||||
#include <linux/rcupdate.h>
|
#include <linux/rcupdate.h>
|
||||||
#include <linux/interrupt.h>
|
#include <linux/interrupt.h>
|
||||||
#include <linux/sched.h>
|
#include <linux/sched.h>
|
||||||
|
#include <linux/nmi.h>
|
||||||
#include <asm/atomic.h>
|
#include <asm/atomic.h>
|
||||||
#include <linux/bitops.h>
|
#include <linux/bitops.h>
|
||||||
#include <linux/module.h>
|
#include <linux/module.h>
|
||||||
@@ -469,6 +470,8 @@ static void print_other_cpu_stall(struct rcu_state *rsp)
|
|||||||
}
|
}
|
||||||
printk(" (detected by %d, t=%ld jiffies)\n",
|
printk(" (detected by %d, t=%ld jiffies)\n",
|
||||||
smp_processor_id(), (long)(jiffies - rsp->gp_start));
|
smp_processor_id(), (long)(jiffies - rsp->gp_start));
|
||||||
|
trigger_all_cpu_backtrace();
|
||||||
|
|
||||||
force_quiescent_state(rsp, 0); /* Kick them all. */
|
force_quiescent_state(rsp, 0); /* Kick them all. */
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -479,12 +482,14 @@ static void print_cpu_stall(struct rcu_state *rsp)
|
|||||||
|
|
||||||
printk(KERN_ERR "INFO: RCU detected CPU %d stall (t=%lu jiffies)\n",
|
printk(KERN_ERR "INFO: RCU detected CPU %d stall (t=%lu jiffies)\n",
|
||||||
smp_processor_id(), jiffies - rsp->gp_start);
|
smp_processor_id(), jiffies - rsp->gp_start);
|
||||||
dump_stack();
|
trigger_all_cpu_backtrace();
|
||||||
|
|
||||||
spin_lock_irqsave(&rnp->lock, flags);
|
spin_lock_irqsave(&rnp->lock, flags);
|
||||||
if ((long)(jiffies - rsp->jiffies_stall) >= 0)
|
if ((long)(jiffies - rsp->jiffies_stall) >= 0)
|
||||||
rsp->jiffies_stall =
|
rsp->jiffies_stall =
|
||||||
jiffies + RCU_SECONDS_TILL_STALL_RECHECK;
|
jiffies + RCU_SECONDS_TILL_STALL_RECHECK;
|
||||||
spin_unlock_irqrestore(&rnp->lock, flags);
|
spin_unlock_irqrestore(&rnp->lock, flags);
|
||||||
|
|
||||||
set_need_resched(); /* kick ourselves to get things going. */
|
set_need_resched(); /* kick ourselves to get things going. */
|
||||||
}
|
}
|
||||||
|
|
||||||
|
Reference in New Issue
Block a user