Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit a8c1be9d authored by Alexander van Heukelum's avatar Alexander van Heukelum Committed by Ingo Molnar
Browse files

x86: initial changes to unify traps_32.c and traps_64.c



This patch does not change the generated object files.

Signed-off-by: default avatarAlexander van Heukelum <heukelum@fastmail.fm>
Signed-off-by: default avatarIngo Molnar <mingo@elte.hu>
parent e93ef949
Loading
Loading
Loading
Loading
+39 −39
Original line number Diff line number Diff line
/*
 *  Copyright (C) 1991, 1992  Linus Torvalds
 *  Copyright (C) 2000, 2001, 2002 Andi Kleen, SuSE Labs
 *
 *  Pentium III FXSR, SSE support
 *	Gareth Hughes <gareth@valinux.com>, May 2000
@@ -130,7 +131,8 @@ void printk_address(unsigned long address, int reliable)
#endif
}

static inline int valid_stack_ptr(struct thread_info *tinfo, void *p, unsigned size)
static inline int valid_stack_ptr(struct thread_info *tinfo,
			void *p, unsigned int size)
{
	return	p > (void *)tinfo &&
		p <= (void *)tinfo + THREAD_SIZE - size;
@@ -167,8 +169,6 @@ print_context_stack(struct thread_info *tinfo,
	return bp;
}

#define MSG(msg)		ops->warning(data, msg)

void dump_trace(struct task_struct *task, struct pt_regs *regs,
		unsigned long *stack, unsigned long bp,
		const struct stacktrace_ops *ops, void *data)
@@ -178,7 +178,6 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,

	if (!stack) {
		unsigned long dummy;

		stack = &dummy;
		if (task != current)
			stack = (unsigned long *)task->thread.sp;
@@ -196,7 +195,7 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
	}
#endif

	while (1) {
	for (;;) {
		struct thread_info *context;

		context = (struct thread_info *)
@@ -351,8 +350,7 @@ void show_registers(struct pt_regs *regs)
		printk(KERN_EMERG "Code: ");

		ip = (u8 *)regs->ip - code_prologue;
		if (ip < (u8 *)PAGE_OFFSET ||
			probe_kernel_address(ip, c)) {
		if (ip < (u8 *)PAGE_OFFSET || probe_kernel_address(ip, c)) {
			/* try starting at EIP */
			ip = (u8 *)regs->ip;
			code_len = code_len - code_prologue + 1;
@@ -818,6 +816,8 @@ static notrace __kprobes void default_do_nmi(struct pt_regs *regs)
	}
	if (notify_die(DIE_NMI, "nmi", regs, reason, 2, SIGINT) == NOTIFY_STOP)
		return;

	/* AK: following checks seem to be broken on modern chipsets. FIXME */
	if (reason & 0x80)
		mem_parity_error(reason, regs);
	if (reason & 0x40)
@@ -997,7 +997,7 @@ void math_error(void __user *ip)
	 * C1 reg you need in case of a stack fault, 0x040 is the stack
	 * fault bit.  We should only be taking one exception at a time,
	 * so if this combination doesn't produce any single exception,
	 * then we have a bad program that isn't syncronizing its FPU usage
	 * then we have a bad program that isn't synchronizing its FPU usage
	 * and it will suffer the consequences since we won't be able to
	 * fully reproduce the context of the exception
	 */
@@ -1201,8 +1201,8 @@ void __init trap_init(void)
	set_trap_gate(0, &divide_error);
	set_intr_gate(1, &debug);
	set_intr_gate(2, &nmi);
	set_system_intr_gate(3, &int3); /* int3/4 can be called from all */
	set_system_gate(4, &overflow);
	set_system_intr_gate(3, &int3); /* int3 can be called from all */
	set_system_gate(4, &overflow); /* int4 can be called from all */
	set_trap_gate(5, &bounds);
	set_trap_gate(6, &invalid_op);
	set_trap_gate(7, &device_not_available);
+152 −157
Original line number Diff line number Diff line
@@ -205,8 +205,6 @@ static unsigned long *in_exception_stack(unsigned cpu, unsigned long stack,
	return NULL;
}

#define MSG(txt) ops->warning(data, txt)

/*
 * x86-64 can have up to three kernel stacks: 
 * process stack
@@ -233,8 +231,8 @@ struct stack_frame {
	unsigned long return_address;
};


static inline unsigned long print_context_stack(struct thread_info *tinfo,
static inline unsigned long
print_context_stack(struct thread_info *tinfo,
		unsigned long *stack, unsigned long bp,
		const struct stacktrace_ops *ops, void *data,
		unsigned long *end)
@@ -259,7 +257,7 @@ static inline unsigned long print_context_stack(struct thread_info *tinfo,
	return bp;
}

void dump_trace(struct task_struct *tsk, struct pt_regs *regs,
void dump_trace(struct task_struct *task, struct pt_regs *regs,
		unsigned long *stack, unsigned long bp,
		const struct stacktrace_ops *ops, void *data)
{
@@ -268,31 +266,29 @@ void dump_trace(struct task_struct *tsk, struct pt_regs *regs,
	unsigned used = 0;
	struct thread_info *tinfo;

	if (!tsk)
		tsk = current;
	tinfo = task_thread_info(tsk);
	if (!task)
		task = current;
	tinfo = task_thread_info(task);

	if (!stack) {
		unsigned long dummy;
		stack = &dummy;
		if (tsk && tsk != current)
			stack = (unsigned long *)tsk->thread.sp;
		if (task && task != current)
			stack = (unsigned long *)task->thread.sp;
	}

#ifdef CONFIG_FRAME_POINTER
	if (!bp) {
		if (tsk == current) {
		if (task == current) {
			/* Grab bp right from our regs */
			asm("movq %%rbp, %0" : "=r" (bp) :);
		} else {
			/* bp is the last reg pushed by switch_to */
			bp = *(unsigned long *) tsk->thread.sp;
			bp = *(unsigned long *) task->thread.sp;
		}
	}
#endif



	/*
	 * Print function call entries in all stacks, starting at the
	 * current stack address. If the stacks consist of nested
@@ -382,18 +378,17 @@ static const struct stacktrace_ops print_trace_ops = {
	.address = print_trace_address,
};

void
show_trace(struct task_struct *tsk, struct pt_regs *regs, unsigned long *stack,
		unsigned long bp)
void show_trace(struct task_struct *task, struct pt_regs *regs,
		unsigned long *stack, unsigned long bp)
{
	printk("\nCall Trace:\n");
	dump_trace(tsk, regs, stack, bp, &print_trace_ops, NULL);
	dump_trace(task, regs, stack, bp, &print_trace_ops, NULL);
	printk("\n");
}

static void
_show_stack(struct task_struct *tsk, struct pt_regs *regs, unsigned long *sp,
							unsigned long bp)
_show_stack(struct task_struct *task, struct pt_regs *regs,
		unsigned long *sp, unsigned long bp)
{
	unsigned long *stack;
	int i;
@@ -405,8 +400,8 @@ _show_stack(struct task_struct *tsk, struct pt_regs *regs, unsigned long *sp,
	// back trace for this cpu.

	if (sp == NULL) {
		if (tsk)
			sp = (unsigned long *)tsk->thread.sp;
		if (task)
			sp = (unsigned long *)task->thread.sp;
		else
			sp = (unsigned long *)&sp;
	}
@@ -427,12 +422,12 @@ _show_stack(struct task_struct *tsk, struct pt_regs *regs, unsigned long *sp,
		printk(" %016lx", *stack++);
		touch_nmi_watchdog();
	}
	show_trace(tsk, regs, sp, bp);
	show_trace(task, regs, sp, bp);
}

void show_stack(struct task_struct *tsk, unsigned long * sp)
void show_stack(struct task_struct *task, unsigned long *sp)
{
	_show_stack(tsk, NULL, sp, 0);
	_show_stack(task, NULL, sp, 0);
}

/*
@@ -440,7 +435,7 @@ void show_stack(struct task_struct *tsk, unsigned long * sp)
 */
void dump_stack(void)
{
	unsigned long dummy;
	unsigned long stack;
	unsigned long bp = 0;

#ifdef CONFIG_FRAME_POINTER
@@ -453,7 +448,7 @@ void dump_stack(void)
		init_utsname()->release,
		(int)strcspn(init_utsname()->version, " "),
		init_utsname()->version);
	show_trace(NULL, NULL, &dummy, bp);
	show_trace(NULL, NULL, &stack, bp);
}

EXPORT_SYMBOL(dump_stack);
@@ -576,8 +571,10 @@ int __kprobes __die(const char * str, struct pt_regs * regs, long err)
	printk("DEBUG_PAGEALLOC");
#endif
	printk("\n");
	if (notify_die(DIE_OOPS, str, regs, err, current->thread.trap_no, SIGSEGV) == NOTIFY_STOP)
	if (notify_die(DIE_OOPS, str, regs, err,
			current->thread.trap_no, SIGSEGV) == NOTIFY_STOP)
		return 1;

	show_registers(regs);
	add_taint(TAINT_DIE);
	/* Executive summary in case the oops scrolled away */
@@ -606,8 +603,7 @@ die_nmi(char *str, struct pt_regs *regs, int do_panic)
{
	unsigned long flags;

	if (notify_die(DIE_NMIWATCHDOG, str, regs, 0, 2, SIGINT) ==
	    NOTIFY_STOP)
	if (notify_die(DIE_NMIWATCHDOG, str, regs, 0, 2, SIGINT) == NOTIFY_STOP)
		return;

	flags = oops_begin();
@@ -629,9 +625,9 @@ die_nmi(char *str, struct pt_regs *regs, int do_panic)
	do_exit(SIGBUS);
}

static void __kprobes do_trap(int trapnr, int signr, char *str,
			      struct pt_regs * regs, long error_code,
			      siginfo_t *info)
static void __kprobes
do_trap(int trapnr, int signr, char *str, struct pt_regs *regs,
	long error_code, siginfo_t *info)
{
	struct task_struct *tsk = current;

@@ -859,7 +855,6 @@ asmlinkage notrace __kprobes void default_do_nmi(struct pt_regs *regs)
		return;

	/* AK: following checks seem to be broken on modern chipsets. FIXME */

	if (reason & 0x80)
		mem_parity_error(reason, regs);
	if (reason & 0x40)
@@ -870,9 +865,12 @@ asmlinkage notrace __kprobes void
do_nmi(struct pt_regs *regs, long error_code)
{
	nmi_enter();

	add_pda(__nmi_count, 1);

	if (!ignore_nmis)
		default_do_nmi(regs);

	nmi_exit();
}

@@ -893,9 +891,10 @@ asmlinkage void __kprobes do_int3(struct pt_regs * regs, long error_code)
{
	trace_hardirqs_fixup();

	if (notify_die(DIE_INT3, "int3", regs, error_code, 3, SIGTRAP) == NOTIFY_STOP) {
	if (notify_die(DIE_INT3, "int3", regs, error_code, 3, SIGTRAP)
			== NOTIFY_STOP)
		return;
	}

	preempt_conditional_sti(regs);
	do_trap(3, SIGTRAP, "int3", regs, error_code, NULL);
	preempt_conditional_cli(regs);
@@ -948,14 +947,12 @@ asmlinkage void __kprobes do_debug(struct pt_regs * regs,

	/* Mask out spurious debug traps due to lazy DR7 setting */
	if (condition & (DR_TRAP0|DR_TRAP1|DR_TRAP2|DR_TRAP3)) {
		if (!tsk->thread.debugreg7) { 
		if (!tsk->thread.debugreg7)
			goto clear_dr7;
	}
	}

	tsk->thread.debugreg6 = condition;


	/*
	 * Single-stepping through TF: make sure we ignore any events in
	 * kernel space (but re-enable TF when returning to user mode).
@@ -975,7 +972,7 @@ asmlinkage void __kprobes do_debug(struct pt_regs * regs,
	force_sig_info(SIGTRAP, &info, tsk);

clear_dr7:
	set_debugreg(0UL, 7);
	set_debugreg(0, 7);
	preempt_conditional_cli(regs);
	return;

@@ -983,6 +980,7 @@ asmlinkage void __kprobes do_debug(struct pt_regs * regs,
	set_tsk_thread_flag(tsk, TIF_SINGLESTEP);
	regs->flags &= ~X86_EFLAGS_TF;
	preempt_conditional_cli(regs);
	return;
}

static int kernel_math_error(struct pt_regs *regs, const char *str, int trapnr)
@@ -1038,8 +1036,8 @@ asmlinkage void do_coprocessor_error(struct pt_regs *regs)
	cwd = get_fpu_cwd(task);
	swd = get_fpu_swd(task);
	switch (swd & ~cwd & 0x3f) {
		case 0x000:
		default:
	case 0x000: /* No unmasked exception */
	default: /* Multiple exceptions */
		break;
	case 0x001: /* Invalid Op */
		/*
@@ -1198,18 +1196,16 @@ void __init trap_init(void)
#ifdef CONFIG_IA32_EMULATION
	set_system_gate(IA32_SYSCALL_VECTOR, ia32_syscall);
#endif
       
	/*
	 * initialize the per thread extended state:
	 */
        init_thread_xstate();
	/*
	 * Should be a barrier for any external CPU state.
	 * Should be a barrier for any external CPU state:
	 */
	cpu_init();
}


static int __init oops_setup(char *s)
{
	if (!s)
@@ -1229,7 +1225,6 @@ static int __init kstack_setup(char *s)
}
early_param("kstack", kstack_setup);


static int __init code_bytes_setup(char *s)
{
	code_bytes = simple_strtoul(s, NULL, 0);