Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit ac8bf564 authored by Linus Torvalds's avatar Linus Torvalds
Browse files
* git://git.kernel.org/pub/scm/linux/kernel/git/davem/sparc-2.6:
  sparc64: Fix hardirq tracing in trap return path.
  sparc64: Use correct pt_regs in decode_access_size() error paths.
  sparc64: Fix PREEMPT_ACTIVE value.
  sparc64: Run NMIs on the hardirq stack.
  sparc64: Allocate sufficient stack space in ftrace stubs.
  sparc: Fix forgotten kmemleak headers inclusion
parents 34388d1c 28a1f533
Loading
Loading
Loading
Loading
+1 −1
Original line number Diff line number Diff line
@@ -111,7 +111,7 @@ struct thread_info {
#define THREAD_SHIFT PAGE_SHIFT
#endif /* PAGE_SHIFT == 13 */

#define PREEMPT_ACTIVE		0x4000000
#define PREEMPT_ACTIVE		0x10000000

/*
 * macros/functions for gaining access to the thread information structure
+2 −18
Original line number Diff line number Diff line
@@ -22,6 +22,7 @@
#include <linux/seq_file.h>
#include <linux/ftrace.h>
#include <linux/irq.h>
#include <linux/kmemleak.h>

#include <asm/ptrace.h>
#include <asm/processor.h>
@@ -46,6 +47,7 @@

#include "entry.h"
#include "cpumap.h"
#include "kstack.h"

#define NUM_IVECS	(IMAP_INR + 1)

@@ -712,24 +714,6 @@ void ack_bad_irq(unsigned int virt_irq)
void *hardirq_stack[NR_CPUS];
void *softirq_stack[NR_CPUS];

static __attribute__((always_inline)) void *set_hardirq_stack(void)
{
	void *orig_sp, *sp = hardirq_stack[smp_processor_id()];

	__asm__ __volatile__("mov %%sp, %0" : "=r" (orig_sp));
	if (orig_sp < sp ||
	    orig_sp > (sp + THREAD_SIZE)) {
		sp += THREAD_SIZE - 192 - STACK_BIAS;
		__asm__ __volatile__("mov %0, %%sp" : : "r" (sp));
	}

	return orig_sp;
}
static __attribute__((always_inline)) void restore_hardirq_stack(void *orig_sp)
{
	__asm__ __volatile__("mov %0, %%sp" : : "r" (orig_sp));
}

void __irq_entry handler_irq(int irq, struct pt_regs *regs)
{
	unsigned long pstate, bucket_pa;
+19 −0
Original line number Diff line number Diff line
@@ -61,4 +61,23 @@ static inline bool kstack_is_trap_frame(struct thread_info *tp, struct pt_regs *

}

static inline __attribute__((always_inline)) void *set_hardirq_stack(void)
{
	void *orig_sp, *sp = hardirq_stack[smp_processor_id()];

	__asm__ __volatile__("mov %%sp, %0" : "=r" (orig_sp));
	if (orig_sp < sp ||
	    orig_sp > (sp + THREAD_SIZE)) {
		sp += THREAD_SIZE - 192 - STACK_BIAS;
		__asm__ __volatile__("mov %0, %%sp" : : "r" (sp));
	}

	return orig_sp;
}

static inline __attribute__((always_inline)) void restore_hardirq_stack(void *orig_sp)
{
	__asm__ __volatile__("mov %0, %%sp" : : "r" (orig_sp));
}

#endif /* _KSTACK_H */
+7 −0
Original line number Diff line number Diff line
@@ -23,6 +23,8 @@
#include <asm/ptrace.h>
#include <asm/pcr.h>

#include "kstack.h"

/* We don't have a real NMI on sparc64, but we can fake one
 * up using profiling counter overflow interrupts and interrupt
 * levels.
@@ -92,6 +94,7 @@ static void die_nmi(const char *str, struct pt_regs *regs, int do_panic)
notrace __kprobes void perfctr_irq(int irq, struct pt_regs *regs)
{
	unsigned int sum, touched = 0;
	void *orig_sp;

	clear_softint(1 << irq);

@@ -99,6 +102,8 @@ notrace __kprobes void perfctr_irq(int irq, struct pt_regs *regs)

	nmi_enter();

	orig_sp = set_hardirq_stack();

	if (notify_die(DIE_NMI, "nmi", regs, 0,
		       pt_regs_trap_type(regs), SIGINT) == NOTIFY_STOP)
		touched = 1;
@@ -124,6 +129,8 @@ notrace __kprobes void perfctr_irq(int irq, struct pt_regs *regs)
		pcr_ops->write(pcr_enable);
	}

	restore_hardirq_stack(orig_sp);

	nmi_exit();
}

+11 −1
Original line number Diff line number Diff line
@@ -130,7 +130,17 @@ rtrap_xcall:
		 nop
		call			trace_hardirqs_on
		 nop
		wrpr			%l4, %pil
		/* Do not actually set the %pil here.  We will do that
		 * below after we clear PSTATE_IE in the %pstate register.
		 * If we re-enable interrupts here, we can recurse down
		 * the hardirq stack potentially endlessly, causing a
		 * stack overflow.
		 *
		 * It is tempting to put this test and trace_hardirqs_on
		 * call at the 'rt_continue' label, but that will not work
		 * as that path hits unconditionally and we do not want to
		 * execute this in NMI return paths, for example.
		 */
#endif
rtrap_no_irq_enable:
		andcc			%l1, TSTATE_PRIV, %l3
Loading