Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit e24b90b2 authored by Ingo Molnar's avatar Ingo Molnar
Browse files

Merge branch 'tip/x86/urgent' of...

Merge branch 'tip/x86/urgent' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt/linux-trace into x86/asm
parents 458ce291 79fb4ad6
Loading
Loading
Loading
Loading
+35 −29
Original line number Diff line number Diff line
@@ -1529,6 +1529,7 @@ ENTRY(nmi)

	/* Use %rdx as out temp variable throughout */
	pushq_cfi %rdx
	CFI_REL_OFFSET rdx, 0

	/*
	 * If %cs was not the kernel segment, then the NMI triggered in user
@@ -1553,6 +1554,7 @@ ENTRY(nmi)
	 */
	lea 6*8(%rsp), %rdx
	test_in_nmi rdx, 4*8(%rsp), nested_nmi, first_nmi
	CFI_REMEMBER_STATE

nested_nmi:
	/*
@@ -1584,10 +1586,12 @@ nested_nmi:

nested_nmi_out:
	popq_cfi %rdx
	CFI_RESTORE rdx

	/* No need to check faults here */
	INTERRUPT_RETURN

	CFI_RESTORE_STATE
first_nmi:
	/*
	 * Because nested NMIs will use the pushed location that we
@@ -1619,10 +1623,15 @@ first_nmi:
	 * | pt_regs                 |
	 * +-------------------------+
	 *
	 * The saved RIP is used to fix up the copied RIP that a nested
	 * NMI may zero out. The original stack frame and the temp storage
	 * The saved stack frame is used to fix up the copied stack frame
	 * that a nested NMI may change to make the interrupted NMI iret jump
	 * to the repeat_nmi. The original stack frame and the temp storage
	 * is also used by nested NMIs and can not be trusted on exit.
	 */
	/* Do not pop rdx, nested NMIs will corrupt that part of the stack */
	movq (%rsp), %rdx
	CFI_RESTORE rdx

	/* Set the NMI executing variable on the stack. */
	pushq_cfi $1

@@ -1630,22 +1639,39 @@ first_nmi:
	.rept 5
	pushq_cfi 6*8(%rsp)
	.endr
	CFI_DEF_CFA_OFFSET SS+8-RIP

	/* Everything up to here is safe from nested NMIs */

	/*
	 * If there was a nested NMI, the first NMI's iret will return
	 * here. But NMIs are still enabled and we can take another
	 * nested NMI. The nested NMI checks the interrupted RIP to see
	 * if it is between repeat_nmi and end_repeat_nmi, and if so
	 * it will just return, as we are about to repeat an NMI anyway.
	 * This makes it safe to copy to the stack frame that a nested
	 * NMI will update.
	 */
repeat_nmi:
	/*
	 * Update the stack variable to say we are still in NMI (the update
	 * is benign for the non-repeat case, where 1 was pushed just above
	 * to this very stack slot).
	 */
	movq $1, 5*8(%rsp)

	/* Make another copy, this one may be modified by nested NMIs */
	.rept 5
	pushq_cfi 4*8(%rsp)
	.endr

	/* Do not pop rdx, nested NMIs will corrupt it */
	movq 11*8(%rsp), %rdx
	CFI_DEF_CFA_OFFSET SS+8-RIP
end_repeat_nmi:

	/*
	 * Everything below this point can be preempted by a nested
	 * NMI if the first NMI took an exception. Repeated NMIs
	 * caused by an exception and nested NMI will start here, and
	 * can still be preempted by another NMI.
	 * NMI if the first NMI took an exception and reset our iret stack
	 * so that we repeat another NMI.
	 */
restart_nmi:
	pushq_cfi $-1		/* ORIG_RAX: no syscall to restart */
	subq $ORIG_RAX-R15, %rsp
	CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
@@ -1674,26 +1700,6 @@ nmi_restore:
	CFI_ENDPROC
END(nmi)

	/*
	 * If an NMI hit an iret because of an exception or breakpoint,
	 * it can lose its NMI context, and a nested NMI may come in.
	 * In that case, the nested NMI will change the preempted NMI's
	 * stack to jump to here when it does the final iret.
	 */
repeat_nmi:
	INTR_FRAME
	/* Update the stack variable to say we are still in NMI */
	movq $1, 5*8(%rsp)

	/* copy the saved stack back to copy stack */
	.rept 5
	pushq_cfi 4*8(%rsp)
	.endr

	jmp restart_nmi
	CFI_ENDPROC
end_repeat_nmi:

ENTRY(ignore_sysret)
	CFI_STARTPROC
	mov $-ENOSYS,%eax