Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 99f7b025 authored by Linus Torvalds's avatar Linus Torvalds
Browse files

Merge branch 'x86-threadinfo-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull x86 threadinfo changes from Ingo Molnar:
 "The main change here is the consolidation/unification of 32 and 64 bit
  thread_info handling methods, from Steve Rostedt"

* 'x86-threadinfo-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  x86, threadinfo: Redo "x86: Use inline assembler to get sp"
  x86: Clean up dumpstack_64.c code
  x86: Keep thread_info on thread stack in x86_32
  x86: Prepare removal of previous_esp from i386 thread_info structure
  x86: Nuke GET_THREAD_INFO_WITH_ESP() macro for i386
  x86: Nuke the supervisor_stack field in i386 thread_info
parents a21e4087 6cce16f9
Loading
Loading
Loading
Loading
+9 −0
Original line number Diff line number Diff line
@@ -449,6 +449,15 @@ struct stack_canary {
};
DECLARE_PER_CPU_ALIGNED(struct stack_canary, stack_canary);
#endif
/*
 * per-CPU IRQ handling stacks
 */
struct irq_stack {
	u32                     stack[THREAD_SIZE/sizeof(u32)];
} __aligned(THREAD_SIZE);

DECLARE_PER_CPU(struct irq_stack *, hardirq_stack);
DECLARE_PER_CPU(struct irq_stack *, softirq_stack);
#endif	/* X86_64 */

extern unsigned int xstate_size;
+5 −48
Original line number Diff line number Diff line
@@ -9,6 +9,7 @@

#include <linux/compiler.h>
#include <asm/page.h>
#include <asm/percpu.h>
#include <asm/types.h>

/*
@@ -32,12 +33,6 @@ struct thread_info {
	mm_segment_t		addr_limit;
	struct restart_block    restart_block;
	void __user		*sysenter_return;
#ifdef CONFIG_X86_32
	unsigned long           previous_esp;   /* ESP of the previous stack in
						   case of nested (IRQ) stacks
						*/
	__u8			supervisor_stack[0];
#endif
	unsigned int		sig_on_uaccess_error:1;
	unsigned int		uaccess_err:1;	/* uaccess failed */
};
@@ -153,9 +148,9 @@ struct thread_info {
#define _TIF_WORK_CTXSW_PREV (_TIF_WORK_CTXSW|_TIF_USER_RETURN_NOTIFY)
#define _TIF_WORK_CTXSW_NEXT (_TIF_WORK_CTXSW)

#ifdef CONFIG_X86_32

#define STACK_WARN		(THREAD_SIZE/8)
#define KERNEL_STACK_OFFSET	(5*(BITS_PER_LONG/8))

/*
 * macros/functions for gaining access to the thread information structure
 *
@@ -163,42 +158,6 @@ struct thread_info {
 */
#ifndef __ASSEMBLY__

#define current_stack_pointer ({		\
	unsigned long sp;			\
	asm("mov %%esp,%0" : "=g" (sp));	\
	sp;					\
})

/* how to get the thread information struct from C */
static inline struct thread_info *current_thread_info(void)
{
	return (struct thread_info *)
		(current_stack_pointer & ~(THREAD_SIZE - 1));
}

#else /* !__ASSEMBLY__ */

/* how to get the thread information struct from ASM */
#define GET_THREAD_INFO(reg)	 \
	movl $-THREAD_SIZE, reg; \
	andl %esp, reg

/* use this one if reg already contains %esp */
#define GET_THREAD_INFO_WITH_ESP(reg) \
	andl $-THREAD_SIZE, reg

#endif

#else /* X86_32 */

#include <asm/percpu.h>
#define KERNEL_STACK_OFFSET (5*8)

/*
 * macros/functions for gaining access to the thread information structure
 * preempt_count needs to be 1 initially, until the scheduler is functional.
 */
#ifndef __ASSEMBLY__
DECLARE_PER_CPU(unsigned long, kernel_stack);

static inline struct thread_info *current_thread_info(void)
@@ -213,8 +172,8 @@ static inline struct thread_info *current_thread_info(void)

/* how to get the thread information struct from ASM */
#define GET_THREAD_INFO(reg) \
	movq PER_CPU_VAR(kernel_stack),reg ; \
	subq $(THREAD_SIZE-KERNEL_STACK_OFFSET),reg
	_ASM_MOV PER_CPU_VAR(kernel_stack),reg ; \
	_ASM_SUB $(THREAD_SIZE-KERNEL_STACK_OFFSET),reg ;

/*
 * Same if PER_CPU_VAR(kernel_stack) is, perhaps with some offset, already in
@@ -224,8 +183,6 @@ static inline struct thread_info *current_thread_info(void)

#endif

#endif /* !X86_32 */

/*
 * Thread-synchronous status.
 *
+4 −4
Original line number Diff line number Diff line
@@ -1079,6 +1079,10 @@ static __init int setup_disablecpuid(char *arg)
}
__setup("clearcpuid=", setup_disablecpuid);

DEFINE_PER_CPU(unsigned long, kernel_stack) =
	(unsigned long)&init_thread_union - KERNEL_STACK_OFFSET + THREAD_SIZE;
EXPORT_PER_CPU_SYMBOL(kernel_stack);

#ifdef CONFIG_X86_64
struct desc_ptr idt_descr = { NR_VECTORS * 16 - 1, (unsigned long) idt_table };
struct desc_ptr debug_idt_descr = { NR_VECTORS * 16 - 1,
@@ -1095,10 +1099,6 @@ DEFINE_PER_CPU(struct task_struct *, current_task) ____cacheline_aligned =
	&init_task;
EXPORT_PER_CPU_SYMBOL(current_task);

DEFINE_PER_CPU(unsigned long, kernel_stack) =
	(unsigned long)&init_thread_union - KERNEL_STACK_OFFSET + THREAD_SIZE;
EXPORT_PER_CPU_SYMBOL(kernel_stack);

DEFINE_PER_CPU(char *, irq_stack_ptr) =
	init_per_cpu_var(irq_stack_union.irq_stack) + IRQ_STACK_SIZE - 64;

+40 −4
Original line number Diff line number Diff line
@@ -16,12 +16,35 @@

#include <asm/stacktrace.h>

static void *is_irq_stack(void *p, void *irq)
{
	if (p < irq || p >= (irq + THREAD_SIZE))
		return NULL;
	return irq + THREAD_SIZE;
}


static void *is_hardirq_stack(unsigned long *stack, int cpu)
{
	void *irq = per_cpu(hardirq_stack, cpu);

	return is_irq_stack(stack, irq);
}

static void *is_softirq_stack(unsigned long *stack, int cpu)
{
	void *irq = per_cpu(softirq_stack, cpu);

	return is_irq_stack(stack, irq);
}

void dump_trace(struct task_struct *task, struct pt_regs *regs,
		unsigned long *stack, unsigned long bp,
		const struct stacktrace_ops *ops, void *data)
{
	const unsigned cpu = get_cpu();
	int graph = 0;
	u32 *prev_esp;

	if (!task)
		task = current;
@@ -39,18 +62,31 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,

	for (;;) {
		struct thread_info *context;
		void *end_stack;

		end_stack = is_hardirq_stack(stack, cpu);
		if (!end_stack)
			end_stack = is_softirq_stack(stack, cpu);

		context = (struct thread_info *)
			((unsigned long)stack & (~(THREAD_SIZE - 1)));
		bp = ops->walk_stack(context, stack, bp, ops, data, NULL, &graph);
		context = task_thread_info(task);
		bp = ops->walk_stack(context, stack, bp, ops, data,
				     end_stack, &graph);

		stack = (unsigned long *)context->previous_esp;
		/* Stop if not on irq stack */
		if (!end_stack)
			break;

		/* The previous esp is saved on the bottom of the stack */
		prev_esp = (u32 *)(end_stack - THREAD_SIZE);
		stack = (unsigned long *)*prev_esp;
		if (!stack)
			break;

		if (ops->stack(data, "IRQ") < 0)
			break;
		touch_nmi_watchdog();
	}
	put_cpu();
}
EXPORT_SYMBOL(dump_trace);

+83 −34
Original line number Diff line number Diff line
@@ -104,6 +104,45 @@ in_irq_stack(unsigned long *stack, unsigned long *irq_stack,
	return (stack >= irq_stack && stack < irq_stack_end);
}

static const unsigned long irq_stack_size =
	(IRQ_STACK_SIZE - 64) / sizeof(unsigned long);

enum stack_type {
	STACK_IS_UNKNOWN,
	STACK_IS_NORMAL,
	STACK_IS_EXCEPTION,
	STACK_IS_IRQ,
};

static enum stack_type
analyze_stack(int cpu, struct task_struct *task,
	      unsigned long *stack, unsigned long **stack_end, char **id)
{
	unsigned long *irq_stack;
	unsigned long addr;
	unsigned used = 0;

	addr = ((unsigned long)stack & (~(THREAD_SIZE - 1)));
	if ((unsigned long)task_stack_page(task) == addr)
		return STACK_IS_NORMAL;

	*stack_end = in_exception_stack(cpu, (unsigned long)stack,
					 &used, id);
	if (*stack_end)
		return STACK_IS_EXCEPTION;

	*stack_end = (unsigned long *)per_cpu(irq_stack_ptr, cpu);
	if (!*stack_end)
		return STACK_IS_UNKNOWN;

	irq_stack = *stack_end - irq_stack_size;

	if (in_irq_stack(stack, irq_stack, *stack_end))
		return STACK_IS_IRQ;

	return STACK_IS_UNKNOWN;
}

/*
 * x86-64 can have up to three kernel stacks:
 * process stack
@@ -116,12 +155,11 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
		const struct stacktrace_ops *ops, void *data)
{
	const unsigned cpu = get_cpu();
	unsigned long *irq_stack_end =
		(unsigned long *)per_cpu(irq_stack_ptr, cpu);
	unsigned used = 0;
	struct thread_info *tinfo;
	int graph = 0;
	unsigned long *irq_stack;
	unsigned long dummy;
	int graph = 0;
	int done = 0;

	if (!task)
		task = current;
@@ -143,50 +181,61 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
	 * exceptions
	 */
	tinfo = task_thread_info(task);
	for (;;) {
	while (!done) {
		unsigned long *stack_end;
		enum stack_type stype;
		char *id;
		unsigned long *estack_end;
		estack_end = in_exception_stack(cpu, (unsigned long)stack,
						&used, &id);

		if (estack_end) {
		stype = analyze_stack(cpu, task, stack, &stack_end, &id);

		/* Default finish unless specified to continue */
		done = 1;

		switch (stype) {

		/* Break out early if we are on the thread stack */
		case STACK_IS_NORMAL:
			break;

		case STACK_IS_EXCEPTION:

			if (ops->stack(data, id) < 0)
				break;

			bp = ops->walk_stack(tinfo, stack, bp, ops,
					     data, estack_end, &graph);
					     data, stack_end, &graph);
			ops->stack(data, "<EOE>");
			/*
			 * We link to the next stack via the
			 * second-to-last pointer (index -2 to end) in the
			 * exception stack:
			 */
			stack = (unsigned long *) estack_end[-2];
			continue;
		}
		if (irq_stack_end) {
			unsigned long *irq_stack;
			irq_stack = irq_stack_end -
				(IRQ_STACK_SIZE - 64) / sizeof(*irq_stack);
			stack = (unsigned long *) stack_end[-2];
			done = 0;
			break;

		case STACK_IS_IRQ:

			if (in_irq_stack(stack, irq_stack, irq_stack_end)) {
			if (ops->stack(data, "IRQ") < 0)
				break;
			bp = ops->walk_stack(tinfo, stack, bp,
					ops, data, irq_stack_end, &graph);
				     ops, data, stack_end, &graph);
			/*
			 * We link to the next stack (which would be
			 * the process stack normally) the last
			 * pointer (index -1 to end) in the IRQ stack:
			 */
				stack = (unsigned long *) (irq_stack_end[-1]);
				irq_stack_end = NULL;
			stack = (unsigned long *) (stack_end[-1]);
			irq_stack = stack_end - irq_stack_size;
			ops->stack(data, "EOI");
				continue;
			}
		}
			done = 0;
			break;

		case STACK_IS_UNKNOWN:
			ops->stack(data, "UNK");
			break;
		}
	}

	/*
	 * This handles the process stack:
Loading