Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 5645688f authored by Linus Torvalds's avatar Linus Torvalds
Browse files

Merge branch 'x86-asm-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull x86 asm updates from Ingo Molnar:
 "The main changes in this development cycle were:

   - a large number of call stack dumping/printing improvements: higher
     robustness, better cross-context dumping, improved output, etc.
     (Josh Poimboeuf)

   - vDSO getcpu() performance improvement for future Intel CPUs with
     the RDPID instruction (Andy Lutomirski)

   - add two new Intel AVX512 features and the CPUID support
     infrastructure for it: AVX512IFMA and AVX512VBMI. (Gayatri Kammela,
     He Chen)

   - more copy-user unification (Borislav Petkov)

   - entry code assembly macro simplifications (Alexander Kuleshov)

   - vDSO C/R support improvements (Dmitry Safonov)

   - misc fixes and cleanups (Borislav Petkov, Paul Bolle)"

* 'x86-asm-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (40 commits)
  scripts/decode_stacktrace.sh: Fix address line detection on x86
  x86/boot/64: Use defines for page size
  x86/dumpstack: Make stack name tags more comprehensible
  selftests/x86: Add test_vdso to test getcpu()
  x86/vdso: Use RDPID in preference to LSL when available
  x86/dumpstack: Handle NULL stack pointer in show_trace_log_lvl()
  x86/cpufeatures: Enable new AVX512 cpu features
  x86/cpuid: Provide get_scattered_cpuid_leaf()
  x86/cpuid: Cleanup cpuid_regs definitions
  x86/copy_user: Unify the code by removing the 64-bit asm _copy_*_user() variants
  x86/unwind: Ensure stack grows down
  x86/vdso: Set vDSO pointer only after success
  x86/prctl/uapi: Remove #ifdef for CHECKPOINT_RESTORE
  x86/unwind: Detect bad stack return address
  x86/dumpstack: Warn on stack recursion
  x86/unwind: Warn on bad frame pointer
  x86/decoder: Use stderr if insn sanity test fails
  x86/decoder: Use stdout if insn decoder test is successful
  mm/page_alloc: Remove kernel address exposure in free_reserved_area()
  x86/dumpstack: Remove raw stack dump
  ...
parents 4ade5b22 53938ee4
Loading
Loading
Loading
Loading
+0 −3
Original line number Diff line number Diff line
@@ -1963,9 +1963,6 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
			kmemcheck=2 (one-shot mode)
			Default: 2 (one-shot mode)

	kstack=N	[X86] Print N words from the kernel stack
			in oops dumps.

	kvm.ignore_msrs=[KVM] Ignore guest accesses to unhandled MSRs.
			Default is 0 (don't ignore, but inject #GP)

+0 −8
Original line number Diff line number Diff line
@@ -40,7 +40,6 @@ show up in /proc/sys/kernel:
- hung_task_warnings
- kexec_load_disabled
- kptr_restrict
- kstack_depth_to_print       [ X86 only ]
- l2cr                        [ PPC only ]
- modprobe                    ==> Documentation/debugging-modules.txt
- modules_disabled
@@ -395,13 +394,6 @@ When kptr_restrict is set to (2), kernel pointers printed using

==============================================================

kstack_depth_to_print: (X86 only)

Controls the number of words to print when dumping the raw
kernel stack.

==============================================================

l2cr: (PPC only)

This flag controls the L2 cache of G3 processor boards. If
+0 −4
Original line number Diff line number Diff line
@@ -277,10 +277,6 @@ IOMMU (input/output memory management unit)
    space might stop working. Use this option if you have devices that
    are accessed from userspace directly on some PCI host bridge.

Debugging

  kstack=N	Print N words from the kernel stack in oops dumps.

Miscellaneous

	nogbpages
+22 −11
Original line number Diff line number Diff line
@@ -90,8 +90,8 @@ For 32-bit we have the following conventions - kernel is built with

#define SIZEOF_PTREGS	21*8

	.macro ALLOC_PT_GPREGS_ON_STACK addskip=0
	addq	$-(15*8+\addskip), %rsp
	.macro ALLOC_PT_GPREGS_ON_STACK
	addq	$-(15*8), %rsp
	.endm

	.macro SAVE_C_REGS_HELPER offset=0 rax=1 rcx=1 r8910=1 r11=1
@@ -147,15 +147,6 @@ For 32-bit we have the following conventions - kernel is built with
	movq 5*8+\offset(%rsp), %rbx
	.endm

	.macro ZERO_EXTRA_REGS
	xorl	%r15d, %r15d
	xorl	%r14d, %r14d
	xorl	%r13d, %r13d
	xorl	%r12d, %r12d
	xorl	%ebp, %ebp
	xorl	%ebx, %ebx
	.endm

	.macro RESTORE_C_REGS_HELPER rstor_rax=1, rstor_rcx=1, rstor_r11=1, rstor_r8910=1, rstor_rdx=1
	.if \rstor_r11
	movq 6*8(%rsp), %r11
@@ -201,6 +192,26 @@ For 32-bit we have the following conventions - kernel is built with
	.byte 0xf1
	.endm

/*
 * This is a sneaky trick to help the unwinder find pt_regs on the stack.  The
 * frame pointer is replaced with an encoded pointer to pt_regs.  The encoding
 * is just setting the LSB, which makes it an invalid stack address and is also
 * a signal to the unwinder that it's a pt_regs pointer in disguise.
 *
 * NOTE: This macro must be used *after* SAVE_EXTRA_REGS because it corrupts
 * the original rbp.
 */
.macro ENCODE_FRAME_POINTER ptregs_offset=0
#ifdef CONFIG_FRAME_POINTER
	.if \ptregs_offset
		leaq \ptregs_offset(%rsp), %rbp
	.else
		mov %rsp, %rbp
	.endif
	orq	$0x1, %rbp
#endif
.endm

#endif /* CONFIG_X86_64 */

/*
+93 −48
Original line number Diff line number Diff line
@@ -45,6 +45,7 @@
#include <asm/asm.h>
#include <asm/smap.h>
#include <asm/export.h>
#include <asm/frame.h>

	.section .entry.text, "ax"

@@ -175,6 +176,22 @@
	SET_KERNEL_GS %edx
.endm

/*
 * This is a sneaky trick to help the unwinder find pt_regs on the stack.  The
 * frame pointer is replaced with an encoded pointer to pt_regs.  The encoding
 * is just setting the LSB, which makes it an invalid stack address and is also
 * a signal to the unwinder that it's a pt_regs pointer in disguise.
 *
 * NOTE: This macro must be used *after* SAVE_ALL because it corrupts the
 * original rbp.
 */
.macro ENCODE_FRAME_POINTER
#ifdef CONFIG_FRAME_POINTER
	mov %esp, %ebp
	orl $0x1, %ebp
#endif
.endm

.macro RESTORE_INT_REGS
	popl	%ebx
	popl	%ecx
@@ -237,6 +254,23 @@ ENTRY(__switch_to_asm)
	jmp	__switch_to
END(__switch_to_asm)

/*
 * The unwinder expects the last frame on the stack to always be at the same
 * offset from the end of the page, which allows it to validate the stack.
 * Calling schedule_tail() directly would break that convention because its an
 * asmlinkage function so its argument has to be pushed on the stack.  This
 * wrapper creates a proper "end of stack" frame header before the call.
 */
ENTRY(schedule_tail_wrapper)
	FRAME_BEGIN

	pushl	%eax
	call	schedule_tail
	popl	%eax

	FRAME_END
	ret
ENDPROC(schedule_tail_wrapper)
/*
 * A newly forked process directly context switches into this address.
 *
@@ -245,9 +279,7 @@ END(__switch_to_asm)
 * edi: kernel thread arg
 */
ENTRY(ret_from_fork)
	pushl	%eax
	call	schedule_tail
	popl	%eax
	call	schedule_tail_wrapper

	testl	%ebx, %ebx
	jnz	1f		/* kernel threads are uncommon */
@@ -307,13 +339,13 @@ END(ret_from_exception)
#ifdef CONFIG_PREEMPT
ENTRY(resume_kernel)
	DISABLE_INTERRUPTS(CLBR_ANY)
need_resched:
.Lneed_resched:
	cmpl	$0, PER_CPU_VAR(__preempt_count)
	jnz	restore_all
	testl	$X86_EFLAGS_IF, PT_EFLAGS(%esp)	# interrupts off (exception path) ?
	jz	restore_all
	call	preempt_schedule_irq
	jmp	need_resched
	jmp	.Lneed_resched
END(resume_kernel)
#endif

@@ -334,7 +366,7 @@ GLOBAL(__begin_SYSENTER_singlestep_region)
 */
ENTRY(xen_sysenter_target)
	addl	$5*4, %esp			/* remove xen-provided frame */
	jmp	sysenter_past_esp
	jmp	.Lsysenter_past_esp
#endif

/*
@@ -371,7 +403,7 @@ ENTRY(xen_sysenter_target)
 */
ENTRY(entry_SYSENTER_32)
	movl	TSS_sysenter_sp0(%esp), %esp
sysenter_past_esp:
.Lsysenter_past_esp:
	pushl	$__USER_DS		/* pt_regs->ss */
	pushl	%ebp			/* pt_regs->sp (stashed in bp) */
	pushfl				/* pt_regs->flags (except IF = 0) */
@@ -504,9 +536,9 @@ ENTRY(entry_INT80_32)

restore_all:
	TRACE_IRQS_IRET
restore_all_notrace:
.Lrestore_all_notrace:
#ifdef CONFIG_X86_ESPFIX32
	ALTERNATIVE	"jmp restore_nocheck", "", X86_BUG_ESPFIX
	ALTERNATIVE	"jmp .Lrestore_nocheck", "", X86_BUG_ESPFIX

	movl	PT_EFLAGS(%esp), %eax		# mix EFLAGS, SS and CS
	/*
@@ -518,22 +550,23 @@ restore_all_notrace:
	movb	PT_CS(%esp), %al
	andl	$(X86_EFLAGS_VM | (SEGMENT_TI_MASK << 8) | SEGMENT_RPL_MASK), %eax
	cmpl	$((SEGMENT_LDT << 8) | USER_RPL), %eax
	je ldt_ss				# returning to user-space with LDT SS
	je .Lldt_ss				# returning to user-space with LDT SS
#endif
restore_nocheck:
.Lrestore_nocheck:
	RESTORE_REGS 4				# skip orig_eax/error_code
irq_return:
.Lirq_return:
	INTERRUPT_RETURN

.section .fixup, "ax"
ENTRY(iret_exc	)
	pushl	$0				# no error code
	pushl	$do_iret_error
	jmp	error_code
	jmp	common_exception
.previous
	_ASM_EXTABLE(irq_return, iret_exc)
	_ASM_EXTABLE(.Lirq_return, iret_exc)

#ifdef CONFIG_X86_ESPFIX32
ldt_ss:
.Lldt_ss:
/*
 * Setup and switch to ESPFIX stack
 *
@@ -562,7 +595,7 @@ ldt_ss:
	 */
	DISABLE_INTERRUPTS(CLBR_EAX)
	lss	(%esp), %esp			/* switch to espfix segment */
	jmp	restore_nocheck
	jmp	.Lrestore_nocheck
#endif
ENDPROC(entry_INT80_32)

@@ -624,6 +657,7 @@ common_interrupt:
	ASM_CLAC
	addl	$-0x80, (%esp)			/* Adjust vector into the [-256, -1] range */
	SAVE_ALL
	ENCODE_FRAME_POINTER
	TRACE_IRQS_OFF
	movl	%esp, %eax
	call	do_IRQ
@@ -635,6 +669,7 @@ ENTRY(name) \
	ASM_CLAC;			\
	pushl	$~(nr);			\
	SAVE_ALL;			\
	ENCODE_FRAME_POINTER;		\
	TRACE_IRQS_OFF			\
	movl	%esp, %eax;		\
	call	fn;			\
@@ -659,7 +694,7 @@ ENTRY(coprocessor_error)
	ASM_CLAC
	pushl	$0
	pushl	$do_coprocessor_error
	jmp	error_code
	jmp	common_exception
END(coprocessor_error)

ENTRY(simd_coprocessor_error)
@@ -673,14 +708,14 @@ ENTRY(simd_coprocessor_error)
#else
	pushl	$do_simd_coprocessor_error
#endif
	jmp	error_code
	jmp	common_exception
END(simd_coprocessor_error)

ENTRY(device_not_available)
	ASM_CLAC
	pushl	$-1				# mark this as an int
	pushl	$do_device_not_available
	jmp	error_code
	jmp	common_exception
END(device_not_available)

#ifdef CONFIG_PARAVIRT
@@ -694,59 +729,59 @@ ENTRY(overflow)
	ASM_CLAC
	pushl	$0
	pushl	$do_overflow
	jmp	error_code
	jmp	common_exception
END(overflow)

ENTRY(bounds)
	ASM_CLAC
	pushl	$0
	pushl	$do_bounds
	jmp	error_code
	jmp	common_exception
END(bounds)

ENTRY(invalid_op)
	ASM_CLAC
	pushl	$0
	pushl	$do_invalid_op
	jmp	error_code
	jmp	common_exception
END(invalid_op)

ENTRY(coprocessor_segment_overrun)
	ASM_CLAC
	pushl	$0
	pushl	$do_coprocessor_segment_overrun
	jmp	error_code
	jmp	common_exception
END(coprocessor_segment_overrun)

ENTRY(invalid_TSS)
	ASM_CLAC
	pushl	$do_invalid_TSS
	jmp	error_code
	jmp	common_exception
END(invalid_TSS)

ENTRY(segment_not_present)
	ASM_CLAC
	pushl	$do_segment_not_present
	jmp	error_code
	jmp	common_exception
END(segment_not_present)

ENTRY(stack_segment)
	ASM_CLAC
	pushl	$do_stack_segment
	jmp	error_code
	jmp	common_exception
END(stack_segment)

ENTRY(alignment_check)
	ASM_CLAC
	pushl	$do_alignment_check
	jmp	error_code
	jmp	common_exception
END(alignment_check)

ENTRY(divide_error)
	ASM_CLAC
	pushl	$0				# no error code
	pushl	$do_divide_error
	jmp	error_code
	jmp	common_exception
END(divide_error)

#ifdef CONFIG_X86_MCE
@@ -754,7 +789,7 @@ ENTRY(machine_check)
	ASM_CLAC
	pushl	$0
	pushl	machine_check_vector
	jmp	error_code
	jmp	common_exception
END(machine_check)
#endif

@@ -762,13 +797,14 @@ ENTRY(spurious_interrupt_bug)
	ASM_CLAC
	pushl	$0
	pushl	$do_spurious_interrupt_bug
	jmp	error_code
	jmp	common_exception
END(spurious_interrupt_bug)

#ifdef CONFIG_XEN
ENTRY(xen_hypervisor_callback)
	pushl	$-1				/* orig_ax = -1 => not a system call */
	SAVE_ALL
	ENCODE_FRAME_POINTER
	TRACE_IRQS_OFF

	/*
@@ -823,6 +859,7 @@ ENTRY(xen_failsafe_callback)
	jmp	iret_exc
5:	pushl	$-1				/* orig_ax = -1 => not a system call */
	SAVE_ALL
	ENCODE_FRAME_POINTER
	jmp	ret_from_exception

.section .fixup, "ax"
@@ -882,7 +919,7 @@ ftrace_call:
	popl	%edx
	popl	%ecx
	popl	%eax
ftrace_ret:
.Lftrace_ret:
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
.globl ftrace_graph_call
ftrace_graph_call:
@@ -952,7 +989,7 @@ GLOBAL(ftrace_regs_call)
	popl	%gs
	addl	$8, %esp			/* Skip orig_ax and ip */
	popf					/* Pop flags at end (no addl to corrupt flags) */
	jmp	ftrace_ret
	jmp	.Lftrace_ret

	popf
	jmp	ftrace_stub
@@ -963,7 +1000,7 @@ ENTRY(mcount)
	jb	ftrace_stub			/* Paging not enabled yet? */

	cmpl	$ftrace_stub, ftrace_trace_function
	jnz	trace
	jnz	.Ltrace
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
	cmpl	$ftrace_stub, ftrace_graph_return
	jnz	ftrace_graph_caller
@@ -976,7 +1013,7 @@ ftrace_stub:
	ret

	/* taken from glibc */
trace:
.Ltrace:
	pushl	%eax
	pushl	%ecx
	pushl	%edx
@@ -1027,7 +1064,7 @@ return_to_handler:
ENTRY(trace_page_fault)
	ASM_CLAC
	pushl	$trace_do_page_fault
	jmp	error_code
	jmp	common_exception
END(trace_page_fault)
#endif

@@ -1035,7 +1072,10 @@ ENTRY(page_fault)
	ASM_CLAC
	pushl	$do_page_fault
	ALIGN
error_code:
	jmp common_exception
END(page_fault)

common_exception:
	/* the function address is in %gs's slot on the stack */
	pushl	%fs
	pushl	%es
@@ -1047,6 +1087,7 @@ error_code:
	pushl	%edx
	pushl	%ecx
	pushl	%ebx
	ENCODE_FRAME_POINTER
	cld
	movl	$(__KERNEL_PERCPU), %ecx
	movl	%ecx, %fs
@@ -1064,7 +1105,7 @@ error_code:
	movl	%esp, %eax			# pt_regs pointer
	call	*%edi
	jmp	ret_from_exception
END(page_fault)
END(common_exception)

ENTRY(debug)
	/*
@@ -1079,6 +1120,7 @@ ENTRY(debug)
	ASM_CLAC
	pushl	$-1				# mark this as an int
	SAVE_ALL
	ENCODE_FRAME_POINTER
	xorl	%edx, %edx			# error code 0
	movl	%esp, %eax			# pt_regs pointer

@@ -1094,11 +1136,11 @@ ENTRY(debug)

.Ldebug_from_sysenter_stack:
	/* We're on the SYSENTER stack.  Switch off. */
	movl	%esp, %ebp
	movl	%esp, %ebx
	movl	PER_CPU_VAR(cpu_current_top_of_stack), %esp
	TRACE_IRQS_OFF
	call	do_debug
	movl	%ebp, %esp
	movl	%ebx, %esp
	jmp	ret_from_exception
END(debug)

@@ -1116,11 +1158,12 @@ ENTRY(nmi)
	movl	%ss, %eax
	cmpw	$__ESPFIX_SS, %ax
	popl	%eax
	je	nmi_espfix_stack
	je	.Lnmi_espfix_stack
#endif

	pushl	%eax				# pt_regs->orig_ax
	SAVE_ALL
	ENCODE_FRAME_POINTER
	xorl	%edx, %edx			# zero error code
	movl	%esp, %eax			# pt_regs pointer

@@ -1132,21 +1175,21 @@ ENTRY(nmi)

	/* Not on SYSENTER stack. */
	call	do_nmi
	jmp	restore_all_notrace
	jmp	.Lrestore_all_notrace

.Lnmi_from_sysenter_stack:
	/*
	 * We're on the SYSENTER stack.  Switch off.  No one (not even debug)
	 * is using the thread stack right now, so it's safe for us to use it.
	 */
	movl	%esp, %ebp
	movl	%esp, %ebx
	movl	PER_CPU_VAR(cpu_current_top_of_stack), %esp
	call	do_nmi
	movl	%ebp, %esp
	jmp	restore_all_notrace
	movl	%ebx, %esp
	jmp	.Lrestore_all_notrace

#ifdef CONFIG_X86_ESPFIX32
nmi_espfix_stack:
.Lnmi_espfix_stack:
	/*
	 * create the pointer to lss back
	 */
@@ -1159,12 +1202,13 @@ nmi_espfix_stack:
	.endr
	pushl	%eax
	SAVE_ALL
	ENCODE_FRAME_POINTER
	FIXUP_ESPFIX_STACK			# %eax == %esp
	xorl	%edx, %edx			# zero error code
	call	do_nmi
	RESTORE_REGS
	lss	12+4(%esp), %esp		# back to espfix stack
	jmp	irq_return
	jmp	.Lirq_return
#endif
END(nmi)

@@ -1172,6 +1216,7 @@ ENTRY(int3)
	ASM_CLAC
	pushl	$-1				# mark this as an int
	SAVE_ALL
	ENCODE_FRAME_POINTER
	TRACE_IRQS_OFF
	xorl	%edx, %edx			# zero error code
	movl	%esp, %eax			# pt_regs pointer
@@ -1181,14 +1226,14 @@ END(int3)

ENTRY(general_protection)
	pushl	$do_general_protection
	jmp	error_code
	jmp	common_exception
END(general_protection)

#ifdef CONFIG_KVM_GUEST
ENTRY(async_page_fault)
	ASM_CLAC
	pushl	$do_async_page_fault
	jmp	error_code
	jmp	common_exception
END(async_page_fault)
#endif

Loading