Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 5645688f authored by Linus Torvalds's avatar Linus Torvalds
Browse files

Merge branch 'x86-asm-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull x86 asm updates from Ingo Molnar:
 "The main changes in this development cycle were:

   - a large number of call stack dumping/printing improvements: higher
     robustness, better cross-context dumping, improved output, etc.
     (Josh Poimboeuf)

   - vDSO getcpu() performance improvement for future Intel CPUs with
     the RDPID instruction (Andy Lutomirski)

   - add two new Intel AVX512 features and the CPUID support
     infrastructure for it: AVX512IFMA and AVX512VBMI. (Gayatri Kammela,
     He Chen)

   - more copy-user unification (Borislav Petkov)

   - entry code assembly macro simplifications (Alexander Kuleshov)

   - vDSO C/R support improvements (Dmitry Safonov)

   - misc fixes and cleanups (Borislav Petkov, Paul Bolle)"

* 'x86-asm-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (40 commits)
  scripts/decode_stacktrace.sh: Fix address line detection on x86
  x86/boot/64: Use defines for page size
  x86/dumpstack: Make stack name tags more comprehensible
  selftests/x86: Add test_vdso to test getcpu()
  x86/vdso: Use RDPID in preference to LSL when available
  x86/dumpstack: Handle NULL stack pointer in show_trace_log_lvl()
  x86/cpufeatures: Enable new AVX512 cpu features
  x86/cpuid: Provide get_scattered_cpuid_leaf()
  x86/cpuid: Cleanup cpuid_regs definitions
  x86/copy_user: Unify the code by removing the 64-bit asm _copy_*_user() variants
  x86/unwind: Ensure stack grows down
  x86/vdso: Set vDSO pointer only after success
  x86/prctl/uapi: Remove #ifdef for CHECKPOINT_RESTORE
  x86/unwind: Detect bad stack return address
  x86/dumpstack: Warn on stack recursion
  x86/unwind: Warn on bad frame pointer
  x86/decoder: Use stderr if insn sanity test fails
  x86/decoder: Use stdout if insn decoder test is successful
  mm/page_alloc: Remove kernel address exposure in free_reserved_area()
  x86/dumpstack: Remove raw stack dump
  ...
parents 4ade5b22 53938ee4
Loading
Loading
Loading
Loading
+0 −3
Original line number Original line Diff line number Diff line
@@ -1963,9 +1963,6 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
			kmemcheck=2 (one-shot mode)
			kmemcheck=2 (one-shot mode)
			Default: 2 (one-shot mode)
			Default: 2 (one-shot mode)


	kstack=N	[X86] Print N words from the kernel stack
			in oops dumps.

	kvm.ignore_msrs=[KVM] Ignore guest accesses to unhandled MSRs.
	kvm.ignore_msrs=[KVM] Ignore guest accesses to unhandled MSRs.
			Default is 0 (don't ignore, but inject #GP)
			Default is 0 (don't ignore, but inject #GP)


+0 −8
Original line number Original line Diff line number Diff line
@@ -40,7 +40,6 @@ show up in /proc/sys/kernel:
- hung_task_warnings
- hung_task_warnings
- kexec_load_disabled
- kexec_load_disabled
- kptr_restrict
- kptr_restrict
- kstack_depth_to_print       [ X86 only ]
- l2cr                        [ PPC only ]
- l2cr                        [ PPC only ]
- modprobe                    ==> Documentation/debugging-modules.txt
- modprobe                    ==> Documentation/debugging-modules.txt
- modules_disabled
- modules_disabled
@@ -395,13 +394,6 @@ When kptr_restrict is set to (2), kernel pointers printed using


==============================================================
==============================================================


kstack_depth_to_print: (X86 only)

Controls the number of words to print when dumping the raw
kernel stack.

==============================================================

l2cr: (PPC only)
l2cr: (PPC only)


This flag controls the L2 cache of G3 processor boards. If
This flag controls the L2 cache of G3 processor boards. If
+0 −4
Original line number Original line Diff line number Diff line
@@ -277,10 +277,6 @@ IOMMU (input/output memory management unit)
    space might stop working. Use this option if you have devices that
    space might stop working. Use this option if you have devices that
    are accessed from userspace directly on some PCI host bridge.
    are accessed from userspace directly on some PCI host bridge.


Debugging

  kstack=N	Print N words from the kernel stack in oops dumps.

Miscellaneous
Miscellaneous


	nogbpages
	nogbpages
+22 −11
Original line number Original line Diff line number Diff line
@@ -90,8 +90,8 @@ For 32-bit we have the following conventions - kernel is built with


#define SIZEOF_PTREGS	21*8
#define SIZEOF_PTREGS	21*8


	.macro ALLOC_PT_GPREGS_ON_STACK addskip=0
	.macro ALLOC_PT_GPREGS_ON_STACK
	addq	$-(15*8+\addskip), %rsp
	addq	$-(15*8), %rsp
	.endm
	.endm


	.macro SAVE_C_REGS_HELPER offset=0 rax=1 rcx=1 r8910=1 r11=1
	.macro SAVE_C_REGS_HELPER offset=0 rax=1 rcx=1 r8910=1 r11=1
@@ -147,15 +147,6 @@ For 32-bit we have the following conventions - kernel is built with
	movq 5*8+\offset(%rsp), %rbx
	movq 5*8+\offset(%rsp), %rbx
	.endm
	.endm


	.macro ZERO_EXTRA_REGS
	xorl	%r15d, %r15d
	xorl	%r14d, %r14d
	xorl	%r13d, %r13d
	xorl	%r12d, %r12d
	xorl	%ebp, %ebp
	xorl	%ebx, %ebx
	.endm

	.macro RESTORE_C_REGS_HELPER rstor_rax=1, rstor_rcx=1, rstor_r11=1, rstor_r8910=1, rstor_rdx=1
	.macro RESTORE_C_REGS_HELPER rstor_rax=1, rstor_rcx=1, rstor_r11=1, rstor_r8910=1, rstor_rdx=1
	.if \rstor_r11
	.if \rstor_r11
	movq 6*8(%rsp), %r11
	movq 6*8(%rsp), %r11
@@ -201,6 +192,26 @@ For 32-bit we have the following conventions - kernel is built with
	.byte 0xf1
	.byte 0xf1
	.endm
	.endm


/*
 * This is a sneaky trick to help the unwinder find pt_regs on the stack.  The
 * frame pointer is replaced with an encoded pointer to pt_regs.  The encoding
 * is just setting the LSB, which makes it an invalid stack address and is also
 * a signal to the unwinder that it's a pt_regs pointer in disguise.
 *
 * NOTE: This macro must be used *after* SAVE_EXTRA_REGS because it corrupts
 * the original rbp.
 */
.macro ENCODE_FRAME_POINTER ptregs_offset=0
#ifdef CONFIG_FRAME_POINTER
	.if \ptregs_offset
		leaq \ptregs_offset(%rsp), %rbp
	.else
		mov %rsp, %rbp
	.endif
	orq	$0x1, %rbp
#endif
.endm

#endif /* CONFIG_X86_64 */
#endif /* CONFIG_X86_64 */


/*
/*
+93 −48
Original line number Original line Diff line number Diff line
@@ -45,6 +45,7 @@
#include <asm/asm.h>
#include <asm/asm.h>
#include <asm/smap.h>
#include <asm/smap.h>
#include <asm/export.h>
#include <asm/export.h>
#include <asm/frame.h>


	.section .entry.text, "ax"
	.section .entry.text, "ax"


@@ -175,6 +176,22 @@
	SET_KERNEL_GS %edx
	SET_KERNEL_GS %edx
.endm
.endm


/*
 * This is a sneaky trick to help the unwinder find pt_regs on the stack.  The
 * frame pointer is replaced with an encoded pointer to pt_regs.  The encoding
 * is just setting the LSB, which makes it an invalid stack address and is also
 * a signal to the unwinder that it's a pt_regs pointer in disguise.
 *
 * NOTE: This macro must be used *after* SAVE_ALL because it corrupts the
 * original rbp.
 */
.macro ENCODE_FRAME_POINTER
#ifdef CONFIG_FRAME_POINTER
	mov %esp, %ebp
	orl $0x1, %ebp
#endif
.endm

.macro RESTORE_INT_REGS
.macro RESTORE_INT_REGS
	popl	%ebx
	popl	%ebx
	popl	%ecx
	popl	%ecx
@@ -237,6 +254,23 @@ ENTRY(__switch_to_asm)
	jmp	__switch_to
	jmp	__switch_to
END(__switch_to_asm)
END(__switch_to_asm)


/*
 * The unwinder expects the last frame on the stack to always be at the same
 * offset from the end of the page, which allows it to validate the stack.
 * Calling schedule_tail() directly would break that convention because its an
 * asmlinkage function so its argument has to be pushed on the stack.  This
 * wrapper creates a proper "end of stack" frame header before the call.
 */
ENTRY(schedule_tail_wrapper)
	FRAME_BEGIN

	pushl	%eax
	call	schedule_tail
	popl	%eax

	FRAME_END
	ret
ENDPROC(schedule_tail_wrapper)
/*
/*
 * A newly forked process directly context switches into this address.
 * A newly forked process directly context switches into this address.
 *
 *
@@ -245,9 +279,7 @@ END(__switch_to_asm)
 * edi: kernel thread arg
 * edi: kernel thread arg
 */
 */
ENTRY(ret_from_fork)
ENTRY(ret_from_fork)
	pushl	%eax
	call	schedule_tail_wrapper
	call	schedule_tail
	popl	%eax


	testl	%ebx, %ebx
	testl	%ebx, %ebx
	jnz	1f		/* kernel threads are uncommon */
	jnz	1f		/* kernel threads are uncommon */
@@ -307,13 +339,13 @@ END(ret_from_exception)
#ifdef CONFIG_PREEMPT
#ifdef CONFIG_PREEMPT
ENTRY(resume_kernel)
ENTRY(resume_kernel)
	DISABLE_INTERRUPTS(CLBR_ANY)
	DISABLE_INTERRUPTS(CLBR_ANY)
need_resched:
.Lneed_resched:
	cmpl	$0, PER_CPU_VAR(__preempt_count)
	cmpl	$0, PER_CPU_VAR(__preempt_count)
	jnz	restore_all
	jnz	restore_all
	testl	$X86_EFLAGS_IF, PT_EFLAGS(%esp)	# interrupts off (exception path) ?
	testl	$X86_EFLAGS_IF, PT_EFLAGS(%esp)	# interrupts off (exception path) ?
	jz	restore_all
	jz	restore_all
	call	preempt_schedule_irq
	call	preempt_schedule_irq
	jmp	need_resched
	jmp	.Lneed_resched
END(resume_kernel)
END(resume_kernel)
#endif
#endif


@@ -334,7 +366,7 @@ GLOBAL(__begin_SYSENTER_singlestep_region)
 */
 */
ENTRY(xen_sysenter_target)
ENTRY(xen_sysenter_target)
	addl	$5*4, %esp			/* remove xen-provided frame */
	addl	$5*4, %esp			/* remove xen-provided frame */
	jmp	sysenter_past_esp
	jmp	.Lsysenter_past_esp
#endif
#endif


/*
/*
@@ -371,7 +403,7 @@ ENTRY(xen_sysenter_target)
 */
 */
ENTRY(entry_SYSENTER_32)
ENTRY(entry_SYSENTER_32)
	movl	TSS_sysenter_sp0(%esp), %esp
	movl	TSS_sysenter_sp0(%esp), %esp
sysenter_past_esp:
.Lsysenter_past_esp:
	pushl	$__USER_DS		/* pt_regs->ss */
	pushl	$__USER_DS		/* pt_regs->ss */
	pushl	%ebp			/* pt_regs->sp (stashed in bp) */
	pushl	%ebp			/* pt_regs->sp (stashed in bp) */
	pushfl				/* pt_regs->flags (except IF = 0) */
	pushfl				/* pt_regs->flags (except IF = 0) */
@@ -504,9 +536,9 @@ ENTRY(entry_INT80_32)


restore_all:
restore_all:
	TRACE_IRQS_IRET
	TRACE_IRQS_IRET
restore_all_notrace:
.Lrestore_all_notrace:
#ifdef CONFIG_X86_ESPFIX32
#ifdef CONFIG_X86_ESPFIX32
	ALTERNATIVE	"jmp restore_nocheck", "", X86_BUG_ESPFIX
	ALTERNATIVE	"jmp .Lrestore_nocheck", "", X86_BUG_ESPFIX


	movl	PT_EFLAGS(%esp), %eax		# mix EFLAGS, SS and CS
	movl	PT_EFLAGS(%esp), %eax		# mix EFLAGS, SS and CS
	/*
	/*
@@ -518,22 +550,23 @@ restore_all_notrace:
	movb	PT_CS(%esp), %al
	movb	PT_CS(%esp), %al
	andl	$(X86_EFLAGS_VM | (SEGMENT_TI_MASK << 8) | SEGMENT_RPL_MASK), %eax
	andl	$(X86_EFLAGS_VM | (SEGMENT_TI_MASK << 8) | SEGMENT_RPL_MASK), %eax
	cmpl	$((SEGMENT_LDT << 8) | USER_RPL), %eax
	cmpl	$((SEGMENT_LDT << 8) | USER_RPL), %eax
	je ldt_ss				# returning to user-space with LDT SS
	je .Lldt_ss				# returning to user-space with LDT SS
#endif
#endif
restore_nocheck:
.Lrestore_nocheck:
	RESTORE_REGS 4				# skip orig_eax/error_code
	RESTORE_REGS 4				# skip orig_eax/error_code
irq_return:
.Lirq_return:
	INTERRUPT_RETURN
	INTERRUPT_RETURN

.section .fixup, "ax"
.section .fixup, "ax"
ENTRY(iret_exc	)
ENTRY(iret_exc	)
	pushl	$0				# no error code
	pushl	$0				# no error code
	pushl	$do_iret_error
	pushl	$do_iret_error
	jmp	error_code
	jmp	common_exception
.previous
.previous
	_ASM_EXTABLE(irq_return, iret_exc)
	_ASM_EXTABLE(.Lirq_return, iret_exc)


#ifdef CONFIG_X86_ESPFIX32
#ifdef CONFIG_X86_ESPFIX32
ldt_ss:
.Lldt_ss:
/*
/*
 * Setup and switch to ESPFIX stack
 * Setup and switch to ESPFIX stack
 *
 *
@@ -562,7 +595,7 @@ ldt_ss:
	 */
	 */
	DISABLE_INTERRUPTS(CLBR_EAX)
	DISABLE_INTERRUPTS(CLBR_EAX)
	lss	(%esp), %esp			/* switch to espfix segment */
	lss	(%esp), %esp			/* switch to espfix segment */
	jmp	restore_nocheck
	jmp	.Lrestore_nocheck
#endif
#endif
ENDPROC(entry_INT80_32)
ENDPROC(entry_INT80_32)


@@ -624,6 +657,7 @@ common_interrupt:
	ASM_CLAC
	ASM_CLAC
	addl	$-0x80, (%esp)			/* Adjust vector into the [-256, -1] range */
	addl	$-0x80, (%esp)			/* Adjust vector into the [-256, -1] range */
	SAVE_ALL
	SAVE_ALL
	ENCODE_FRAME_POINTER
	TRACE_IRQS_OFF
	TRACE_IRQS_OFF
	movl	%esp, %eax
	movl	%esp, %eax
	call	do_IRQ
	call	do_IRQ
@@ -635,6 +669,7 @@ ENTRY(name) \
	ASM_CLAC;			\
	ASM_CLAC;			\
	pushl	$~(nr);			\
	pushl	$~(nr);			\
	SAVE_ALL;			\
	SAVE_ALL;			\
	ENCODE_FRAME_POINTER;		\
	TRACE_IRQS_OFF			\
	TRACE_IRQS_OFF			\
	movl	%esp, %eax;		\
	movl	%esp, %eax;		\
	call	fn;			\
	call	fn;			\
@@ -659,7 +694,7 @@ ENTRY(coprocessor_error)
	ASM_CLAC
	ASM_CLAC
	pushl	$0
	pushl	$0
	pushl	$do_coprocessor_error
	pushl	$do_coprocessor_error
	jmp	error_code
	jmp	common_exception
END(coprocessor_error)
END(coprocessor_error)


ENTRY(simd_coprocessor_error)
ENTRY(simd_coprocessor_error)
@@ -673,14 +708,14 @@ ENTRY(simd_coprocessor_error)
#else
#else
	pushl	$do_simd_coprocessor_error
	pushl	$do_simd_coprocessor_error
#endif
#endif
	jmp	error_code
	jmp	common_exception
END(simd_coprocessor_error)
END(simd_coprocessor_error)


ENTRY(device_not_available)
ENTRY(device_not_available)
	ASM_CLAC
	ASM_CLAC
	pushl	$-1				# mark this as an int
	pushl	$-1				# mark this as an int
	pushl	$do_device_not_available
	pushl	$do_device_not_available
	jmp	error_code
	jmp	common_exception
END(device_not_available)
END(device_not_available)


#ifdef CONFIG_PARAVIRT
#ifdef CONFIG_PARAVIRT
@@ -694,59 +729,59 @@ ENTRY(overflow)
	ASM_CLAC
	ASM_CLAC
	pushl	$0
	pushl	$0
	pushl	$do_overflow
	pushl	$do_overflow
	jmp	error_code
	jmp	common_exception
END(overflow)
END(overflow)


ENTRY(bounds)
ENTRY(bounds)
	ASM_CLAC
	ASM_CLAC
	pushl	$0
	pushl	$0
	pushl	$do_bounds
	pushl	$do_bounds
	jmp	error_code
	jmp	common_exception
END(bounds)
END(bounds)


ENTRY(invalid_op)
ENTRY(invalid_op)
	ASM_CLAC
	ASM_CLAC
	pushl	$0
	pushl	$0
	pushl	$do_invalid_op
	pushl	$do_invalid_op
	jmp	error_code
	jmp	common_exception
END(invalid_op)
END(invalid_op)


ENTRY(coprocessor_segment_overrun)
ENTRY(coprocessor_segment_overrun)
	ASM_CLAC
	ASM_CLAC
	pushl	$0
	pushl	$0
	pushl	$do_coprocessor_segment_overrun
	pushl	$do_coprocessor_segment_overrun
	jmp	error_code
	jmp	common_exception
END(coprocessor_segment_overrun)
END(coprocessor_segment_overrun)


ENTRY(invalid_TSS)
ENTRY(invalid_TSS)
	ASM_CLAC
	ASM_CLAC
	pushl	$do_invalid_TSS
	pushl	$do_invalid_TSS
	jmp	error_code
	jmp	common_exception
END(invalid_TSS)
END(invalid_TSS)


ENTRY(segment_not_present)
ENTRY(segment_not_present)
	ASM_CLAC
	ASM_CLAC
	pushl	$do_segment_not_present
	pushl	$do_segment_not_present
	jmp	error_code
	jmp	common_exception
END(segment_not_present)
END(segment_not_present)


ENTRY(stack_segment)
ENTRY(stack_segment)
	ASM_CLAC
	ASM_CLAC
	pushl	$do_stack_segment
	pushl	$do_stack_segment
	jmp	error_code
	jmp	common_exception
END(stack_segment)
END(stack_segment)


ENTRY(alignment_check)
ENTRY(alignment_check)
	ASM_CLAC
	ASM_CLAC
	pushl	$do_alignment_check
	pushl	$do_alignment_check
	jmp	error_code
	jmp	common_exception
END(alignment_check)
END(alignment_check)


ENTRY(divide_error)
ENTRY(divide_error)
	ASM_CLAC
	ASM_CLAC
	pushl	$0				# no error code
	pushl	$0				# no error code
	pushl	$do_divide_error
	pushl	$do_divide_error
	jmp	error_code
	jmp	common_exception
END(divide_error)
END(divide_error)


#ifdef CONFIG_X86_MCE
#ifdef CONFIG_X86_MCE
@@ -754,7 +789,7 @@ ENTRY(machine_check)
	ASM_CLAC
	ASM_CLAC
	pushl	$0
	pushl	$0
	pushl	machine_check_vector
	pushl	machine_check_vector
	jmp	error_code
	jmp	common_exception
END(machine_check)
END(machine_check)
#endif
#endif


@@ -762,13 +797,14 @@ ENTRY(spurious_interrupt_bug)
	ASM_CLAC
	ASM_CLAC
	pushl	$0
	pushl	$0
	pushl	$do_spurious_interrupt_bug
	pushl	$do_spurious_interrupt_bug
	jmp	error_code
	jmp	common_exception
END(spurious_interrupt_bug)
END(spurious_interrupt_bug)


#ifdef CONFIG_XEN
#ifdef CONFIG_XEN
ENTRY(xen_hypervisor_callback)
ENTRY(xen_hypervisor_callback)
	pushl	$-1				/* orig_ax = -1 => not a system call */
	pushl	$-1				/* orig_ax = -1 => not a system call */
	SAVE_ALL
	SAVE_ALL
	ENCODE_FRAME_POINTER
	TRACE_IRQS_OFF
	TRACE_IRQS_OFF


	/*
	/*
@@ -823,6 +859,7 @@ ENTRY(xen_failsafe_callback)
	jmp	iret_exc
	jmp	iret_exc
5:	pushl	$-1				/* orig_ax = -1 => not a system call */
5:	pushl	$-1				/* orig_ax = -1 => not a system call */
	SAVE_ALL
	SAVE_ALL
	ENCODE_FRAME_POINTER
	jmp	ret_from_exception
	jmp	ret_from_exception


.section .fixup, "ax"
.section .fixup, "ax"
@@ -882,7 +919,7 @@ ftrace_call:
	popl	%edx
	popl	%edx
	popl	%ecx
	popl	%ecx
	popl	%eax
	popl	%eax
ftrace_ret:
.Lftrace_ret:
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
.globl ftrace_graph_call
.globl ftrace_graph_call
ftrace_graph_call:
ftrace_graph_call:
@@ -952,7 +989,7 @@ GLOBAL(ftrace_regs_call)
	popl	%gs
	popl	%gs
	addl	$8, %esp			/* Skip orig_ax and ip */
	addl	$8, %esp			/* Skip orig_ax and ip */
	popf					/* Pop flags at end (no addl to corrupt flags) */
	popf					/* Pop flags at end (no addl to corrupt flags) */
	jmp	ftrace_ret
	jmp	.Lftrace_ret


	popf
	popf
	jmp	ftrace_stub
	jmp	ftrace_stub
@@ -963,7 +1000,7 @@ ENTRY(mcount)
	jb	ftrace_stub			/* Paging not enabled yet? */
	jb	ftrace_stub			/* Paging not enabled yet? */


	cmpl	$ftrace_stub, ftrace_trace_function
	cmpl	$ftrace_stub, ftrace_trace_function
	jnz	trace
	jnz	.Ltrace
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
	cmpl	$ftrace_stub, ftrace_graph_return
	cmpl	$ftrace_stub, ftrace_graph_return
	jnz	ftrace_graph_caller
	jnz	ftrace_graph_caller
@@ -976,7 +1013,7 @@ ftrace_stub:
	ret
	ret


	/* taken from glibc */
	/* taken from glibc */
trace:
.Ltrace:
	pushl	%eax
	pushl	%eax
	pushl	%ecx
	pushl	%ecx
	pushl	%edx
	pushl	%edx
@@ -1027,7 +1064,7 @@ return_to_handler:
ENTRY(trace_page_fault)
ENTRY(trace_page_fault)
	ASM_CLAC
	ASM_CLAC
	pushl	$trace_do_page_fault
	pushl	$trace_do_page_fault
	jmp	error_code
	jmp	common_exception
END(trace_page_fault)
END(trace_page_fault)
#endif
#endif


@@ -1035,7 +1072,10 @@ ENTRY(page_fault)
	ASM_CLAC
	ASM_CLAC
	pushl	$do_page_fault
	pushl	$do_page_fault
	ALIGN
	ALIGN
error_code:
	jmp common_exception
END(page_fault)

common_exception:
	/* the function address is in %gs's slot on the stack */
	/* the function address is in %gs's slot on the stack */
	pushl	%fs
	pushl	%fs
	pushl	%es
	pushl	%es
@@ -1047,6 +1087,7 @@ error_code:
	pushl	%edx
	pushl	%edx
	pushl	%ecx
	pushl	%ecx
	pushl	%ebx
	pushl	%ebx
	ENCODE_FRAME_POINTER
	cld
	cld
	movl	$(__KERNEL_PERCPU), %ecx
	movl	$(__KERNEL_PERCPU), %ecx
	movl	%ecx, %fs
	movl	%ecx, %fs
@@ -1064,7 +1105,7 @@ error_code:
	movl	%esp, %eax			# pt_regs pointer
	movl	%esp, %eax			# pt_regs pointer
	call	*%edi
	call	*%edi
	jmp	ret_from_exception
	jmp	ret_from_exception
END(page_fault)
END(common_exception)


ENTRY(debug)
ENTRY(debug)
	/*
	/*
@@ -1079,6 +1120,7 @@ ENTRY(debug)
	ASM_CLAC
	ASM_CLAC
	pushl	$-1				# mark this as an int
	pushl	$-1				# mark this as an int
	SAVE_ALL
	SAVE_ALL
	ENCODE_FRAME_POINTER
	xorl	%edx, %edx			# error code 0
	xorl	%edx, %edx			# error code 0
	movl	%esp, %eax			# pt_regs pointer
	movl	%esp, %eax			# pt_regs pointer


@@ -1094,11 +1136,11 @@ ENTRY(debug)


.Ldebug_from_sysenter_stack:
.Ldebug_from_sysenter_stack:
	/* We're on the SYSENTER stack.  Switch off. */
	/* We're on the SYSENTER stack.  Switch off. */
	movl	%esp, %ebp
	movl	%esp, %ebx
	movl	PER_CPU_VAR(cpu_current_top_of_stack), %esp
	movl	PER_CPU_VAR(cpu_current_top_of_stack), %esp
	TRACE_IRQS_OFF
	TRACE_IRQS_OFF
	call	do_debug
	call	do_debug
	movl	%ebp, %esp
	movl	%ebx, %esp
	jmp	ret_from_exception
	jmp	ret_from_exception
END(debug)
END(debug)


@@ -1116,11 +1158,12 @@ ENTRY(nmi)
	movl	%ss, %eax
	movl	%ss, %eax
	cmpw	$__ESPFIX_SS, %ax
	cmpw	$__ESPFIX_SS, %ax
	popl	%eax
	popl	%eax
	je	nmi_espfix_stack
	je	.Lnmi_espfix_stack
#endif
#endif


	pushl	%eax				# pt_regs->orig_ax
	pushl	%eax				# pt_regs->orig_ax
	SAVE_ALL
	SAVE_ALL
	ENCODE_FRAME_POINTER
	xorl	%edx, %edx			# zero error code
	xorl	%edx, %edx			# zero error code
	movl	%esp, %eax			# pt_regs pointer
	movl	%esp, %eax			# pt_regs pointer


@@ -1132,21 +1175,21 @@ ENTRY(nmi)


	/* Not on SYSENTER stack. */
	/* Not on SYSENTER stack. */
	call	do_nmi
	call	do_nmi
	jmp	restore_all_notrace
	jmp	.Lrestore_all_notrace


.Lnmi_from_sysenter_stack:
.Lnmi_from_sysenter_stack:
	/*
	/*
	 * We're on the SYSENTER stack.  Switch off.  No one (not even debug)
	 * We're on the SYSENTER stack.  Switch off.  No one (not even debug)
	 * is using the thread stack right now, so it's safe for us to use it.
	 * is using the thread stack right now, so it's safe for us to use it.
	 */
	 */
	movl	%esp, %ebp
	movl	%esp, %ebx
	movl	PER_CPU_VAR(cpu_current_top_of_stack), %esp
	movl	PER_CPU_VAR(cpu_current_top_of_stack), %esp
	call	do_nmi
	call	do_nmi
	movl	%ebp, %esp
	movl	%ebx, %esp
	jmp	restore_all_notrace
	jmp	.Lrestore_all_notrace


#ifdef CONFIG_X86_ESPFIX32
#ifdef CONFIG_X86_ESPFIX32
nmi_espfix_stack:
.Lnmi_espfix_stack:
	/*
	/*
	 * create the pointer to lss back
	 * create the pointer to lss back
	 */
	 */
@@ -1159,12 +1202,13 @@ nmi_espfix_stack:
	.endr
	.endr
	pushl	%eax
	pushl	%eax
	SAVE_ALL
	SAVE_ALL
	ENCODE_FRAME_POINTER
	FIXUP_ESPFIX_STACK			# %eax == %esp
	FIXUP_ESPFIX_STACK			# %eax == %esp
	xorl	%edx, %edx			# zero error code
	xorl	%edx, %edx			# zero error code
	call	do_nmi
	call	do_nmi
	RESTORE_REGS
	RESTORE_REGS
	lss	12+4(%esp), %esp		# back to espfix stack
	lss	12+4(%esp), %esp		# back to espfix stack
	jmp	irq_return
	jmp	.Lirq_return
#endif
#endif
END(nmi)
END(nmi)


@@ -1172,6 +1216,7 @@ ENTRY(int3)
	ASM_CLAC
	ASM_CLAC
	pushl	$-1				# mark this as an int
	pushl	$-1				# mark this as an int
	SAVE_ALL
	SAVE_ALL
	ENCODE_FRAME_POINTER
	TRACE_IRQS_OFF
	TRACE_IRQS_OFF
	xorl	%edx, %edx			# zero error code
	xorl	%edx, %edx			# zero error code
	movl	%esp, %eax			# pt_regs pointer
	movl	%esp, %eax			# pt_regs pointer
@@ -1181,14 +1226,14 @@ END(int3)


ENTRY(general_protection)
ENTRY(general_protection)
	pushl	$do_general_protection
	pushl	$do_general_protection
	jmp	error_code
	jmp	common_exception
END(general_protection)
END(general_protection)


#ifdef CONFIG_KVM_GUEST
#ifdef CONFIG_KVM_GUEST
ENTRY(async_page_fault)
ENTRY(async_page_fault)
	ASM_CLAC
	ASM_CLAC
	pushl	$do_async_page_fault
	pushl	$do_async_page_fault
	jmp	error_code
	jmp	common_exception
END(async_page_fault)
END(async_page_fault)
#endif
#endif


Loading