Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 47cab6a7 authored by Ingo Molnar's avatar Ingo Molnar
Browse files

debug lockups: Improve lockup detection, fix generic arch fallback



As Andrew noted, my previous patch ("debug lockups: Improve lockup
detection") broke/removed SysRq-L support from architecture that do
not provide a __trigger_all_cpu_backtrace implementation.

Restore a fallback path and clean up the SysRq-L machinery a bit:

 - Rename the arch method to arch_trigger_all_cpu_backtrace()

 - Simplify the define

 - Document the method a bit - in the hope of more architectures
   adding support for it.

[ The patch touches Sparc code for the rename. ]

Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: "David S. Miller" <davem@davemloft.net>
LKML-Reference: <20090802140809.7ec4bb6b.akpm@linux-foundation.org>
Signed-off-by: default avatarIngo Molnar <mingo@elte.hu>
parent c1dc0b9c
Loading
Loading
Loading
Loading
+2 −2
Original line number Diff line number Diff line
@@ -89,8 +89,8 @@ static inline unsigned long get_softint(void)
	return retval;
}

void __trigger_all_cpu_backtrace(void);
#define trigger_all_cpu_backtrace() __trigger_all_cpu_backtrace()
void arch_trigger_all_cpu_backtrace(void);
#define arch_trigger_all_cpu_backtrace arch_trigger_all_cpu_backtrace

extern void *hardirq_stack[NR_CPUS];
extern void *softirq_stack[NR_CPUS];
+2 −2
Original line number Diff line number Diff line
@@ -251,7 +251,7 @@ static void __global_reg_poll(struct global_reg_snapshot *gp)
	}
}

void __trigger_all_cpu_backtrace(void)
void arch_trigger_all_cpu_backtrace(void)
{
	struct thread_info *tp = current_thread_info();
	struct pt_regs *regs = get_irq_regs();
@@ -304,7 +304,7 @@ void __trigger_all_cpu_backtrace(void)

static void sysrq_handle_globreg(int key, struct tty_struct *tty)
{
	__trigger_all_cpu_backtrace();
	arch_trigger_all_cpu_backtrace();
}

static struct sysrq_key_op sparc_globalreg_op = {
+2 −2
Original line number Diff line number Diff line
@@ -45,8 +45,8 @@ extern int proc_nmi_enabled(struct ctl_table *, int , struct file *,
			void __user *, size_t *, loff_t *);
extern int unknown_nmi_panic;

void __trigger_all_cpu_backtrace(void);
#define trigger_all_cpu_backtrace() __trigger_all_cpu_backtrace()
void arch_trigger_all_cpu_backtrace(void);
#define arch_trigger_all_cpu_backtrace arch_trigger_all_cpu_backtrace

static inline void localise_nmi_watchdog(void)
{
+1 −1
Original line number Diff line number Diff line
@@ -554,7 +554,7 @@ int do_nmi_callback(struct pt_regs *regs, int cpu)
	return 0;
}

void __trigger_all_cpu_backtrace(void)
void arch_trigger_all_cpu_backtrace(void)
{
	int i;

+14 −1
Original line number Diff line number Diff line
@@ -223,7 +223,20 @@ static DECLARE_WORK(sysrq_showallcpus, sysrq_showregs_othercpus);

static void sysrq_handle_showallcpus(int key, struct tty_struct *tty)
{
	trigger_all_cpu_backtrace();
	/*
	 * Fall back to the workqueue based printing if the
	 * backtrace printing did not succeed or the
	 * architecture has no support for it:
	 */
	if (!trigger_all_cpu_backtrace()) {
		struct pt_regs *regs = get_irq_regs();

		if (regs) {
			printk(KERN_INFO "CPU%d:\n", smp_processor_id());
			show_regs(regs);
		}
		schedule_work(&sysrq_showallcpus);
	}
}

static struct sysrq_key_op sysrq_showallcpus_op = {
Loading