Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 929bacec authored by Andy Lutomirski's avatar Andy Lutomirski Committed by Ingo Molnar
Browse files

x86/entry/64: De-Xen-ify our NMI code



Xen PV is fundamentally incompatible with our fancy NMI code: it
doesn't use IST at all, and Xen entries clobber two stack slots
below the hardware frame.

Drop Xen PV support from our NMI code entirely.

Signed-off-by: default avatarAndy Lutomirski <luto@kernel.org>
Reviewed-by: default avatarBorislav Petkov <bp@suse.de>
Acked-by: default avatarJuergen Gross <jgross@suse.com>
Cc: Boris Ostrovsky <boris.ostrovsky@oracle.com>
Cc: Borislav Petkov <bpetkov@suse.de>
Cc: Brian Gerst <brgerst@gmail.com>
Cc: Dave Hansen <dave.hansen@intel.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Link: http://lkml.kernel.org/r/bfbe711b5ae03f672f8848999a8eb2711efc7f98.1509609304.git.luto@kernel.org


Signed-off-by: default avatarIngo Molnar <mingo@kernel.org>
parent 43e41110
Loading
Loading
Loading
Loading
+18 −12
Original line number Original line Diff line number Diff line
@@ -1240,9 +1240,13 @@ ENTRY(error_exit)
	jmp	retint_user
	jmp	retint_user
END(error_exit)
END(error_exit)


/* Runs on exception stack */
/*
 * Runs on exception stack.  Xen PV does not go through this path at all,
 * so we can use real assembly here.
 */
ENTRY(nmi)
ENTRY(nmi)
	UNWIND_HINT_IRET_REGS
	UNWIND_HINT_IRET_REGS

	/*
	/*
	 * We allow breakpoints in NMIs. If a breakpoint occurs, then
	 * We allow breakpoints in NMIs. If a breakpoint occurs, then
	 * the iretq it performs will take us out of NMI context.
	 * the iretq it performs will take us out of NMI context.
@@ -1300,7 +1304,7 @@ ENTRY(nmi)
	 * stacks lest we corrupt the "NMI executing" variable.
	 * stacks lest we corrupt the "NMI executing" variable.
	 */
	 */


	SWAPGS_UNSAFE_STACK
	swapgs
	cld
	cld
	movq	%rsp, %rdx
	movq	%rsp, %rdx
	movq	PER_CPU_VAR(cpu_current_top_of_stack), %rsp
	movq	PER_CPU_VAR(cpu_current_top_of_stack), %rsp
@@ -1465,7 +1469,7 @@ nested_nmi_out:
	popq	%rdx
	popq	%rdx


	/* We are returning to kernel mode, so this cannot result in a fault. */
	/* We are returning to kernel mode, so this cannot result in a fault. */
	INTERRUPT_RETURN
	iretq


first_nmi:
first_nmi:
	/* Restore rdx. */
	/* Restore rdx. */
@@ -1496,7 +1500,7 @@ first_nmi:
	pushfq			/* RFLAGS */
	pushfq			/* RFLAGS */
	pushq	$__KERNEL_CS	/* CS */
	pushq	$__KERNEL_CS	/* CS */
	pushq	$1f		/* RIP */
	pushq	$1f		/* RIP */
	INTERRUPT_RETURN	/* continues at repeat_nmi below */
	iretq			/* continues at repeat_nmi below */
	UNWIND_HINT_IRET_REGS
	UNWIND_HINT_IRET_REGS
1:
1:
#endif
#endif
@@ -1571,20 +1575,22 @@ nmi_restore:
	/*
	/*
	 * Clear "NMI executing".  Set DF first so that we can easily
	 * Clear "NMI executing".  Set DF first so that we can easily
	 * distinguish the remaining code between here and IRET from
	 * distinguish the remaining code between here and IRET from
	 * the SYSCALL entry and exit paths.  On a native kernel, we
	 * the SYSCALL entry and exit paths.
	 * could just inspect RIP, but, on paravirt kernels,
	 *
	 * INTERRUPT_RETURN can translate into a jump into a
	 * We arguably should just inspect RIP instead, but I (Andy) wrote
	 * hypercall page.
	 * this code when I had the misapprehension that Xen PV supported
	 * NMIs, and Xen PV would break that approach.
	 */
	 */
	std
	std
	movq	$0, 5*8(%rsp)		/* clear "NMI executing" */
	movq	$0, 5*8(%rsp)		/* clear "NMI executing" */


	/*
	/*
	 * INTERRUPT_RETURN reads the "iret" frame and exits the NMI
	 * iretq reads the "iret" frame and exits the NMI stack in a
	 * stack in a single instruction.  We are returning to kernel
	 * single instruction.  We are returning to kernel mode, so this
	 * mode, so this cannot result in a fault.
	 * cannot result in a fault.  Similarly, we don't need to worry
	 * about espfix64 on the way back to kernel mode.
	 */
	 */
	INTERRUPT_RETURN
	iretq
END(nmi)
END(nmi)


ENTRY(ignore_sysret)
ENTRY(ignore_sysret)