Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit f88cf230 authored by Linus Torvalds's avatar Linus Torvalds
Browse files

Merge branch 'x86-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull x86 fix from Peter Anvin:
 "A single fix to not invoke the espfix code on Xen PV, as it turns out
  to oops the guest when invoked after all.  This patch leaves some
  amount of dead code, in particular unnecessary initialization of the
  espfix stacks when they won't be used, but in the interest of keeping
  the patch minimal that cleanup can wait for the next cycle"

* 'x86-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  x86_64/entry/xen: Do not invoke espfix64 on Xen
parents ecb679fc 7209a75d
Loading
Loading
Loading
Loading
+1 −1
Original line number Original line Diff line number Diff line
@@ -129,7 +129,7 @@ static inline notrace unsigned long arch_local_irq_save(void)


#define PARAVIRT_ADJUST_EXCEPTION_FRAME	/*  */
#define PARAVIRT_ADJUST_EXCEPTION_FRAME	/*  */


#define INTERRUPT_RETURN	iretq
#define INTERRUPT_RETURN	jmp native_iret
#define USERGS_SYSRET64				\
#define USERGS_SYSRET64				\
	swapgs;					\
	swapgs;					\
	sysretq;
	sysretq;
+10 −18
Original line number Original line Diff line number Diff line
@@ -830,27 +830,24 @@ restore_args:
	RESTORE_ARGS 1,8,1
	RESTORE_ARGS 1,8,1


irq_return:
irq_return:
	INTERRUPT_RETURN

ENTRY(native_iret)
	/*
	/*
	 * Are we returning to a stack segment from the LDT?  Note: in
	 * Are we returning to a stack segment from the LDT?  Note: in
	 * 64-bit mode SS:RSP on the exception stack is always valid.
	 * 64-bit mode SS:RSP on the exception stack is always valid.
	 */
	 */
#ifdef CONFIG_X86_ESPFIX64
#ifdef CONFIG_X86_ESPFIX64
	testb $4,(SS-RIP)(%rsp)
	testb $4,(SS-RIP)(%rsp)
	jnz irq_return_ldt
	jnz native_irq_return_ldt
#endif
#endif


irq_return_iret:
native_irq_return_iret:
	INTERRUPT_RETURN
	_ASM_EXTABLE(irq_return_iret, bad_iret)

#ifdef CONFIG_PARAVIRT
ENTRY(native_iret)
	iretq
	iretq
	_ASM_EXTABLE(native_iret, bad_iret)
	_ASM_EXTABLE(native_irq_return_iret, bad_iret)
#endif


#ifdef CONFIG_X86_ESPFIX64
#ifdef CONFIG_X86_ESPFIX64
irq_return_ldt:
native_irq_return_ldt:
	pushq_cfi %rax
	pushq_cfi %rax
	pushq_cfi %rdi
	pushq_cfi %rdi
	SWAPGS
	SWAPGS
@@ -872,7 +869,7 @@ irq_return_ldt:
	SWAPGS
	SWAPGS
	movq %rax,%rsp
	movq %rax,%rsp
	popq_cfi %rax
	popq_cfi %rax
	jmp irq_return_iret
	jmp native_irq_return_iret
#endif
#endif


	.section .fixup,"ax"
	.section .fixup,"ax"
@@ -956,13 +953,8 @@ __do_double_fault:
	cmpl $__KERNEL_CS,CS(%rdi)
	cmpl $__KERNEL_CS,CS(%rdi)
	jne do_double_fault
	jne do_double_fault
	movq RIP(%rdi),%rax
	movq RIP(%rdi),%rax
	cmpq $irq_return_iret,%rax
	cmpq $native_irq_return_iret,%rax
#ifdef CONFIG_PARAVIRT
	je 1f
	cmpq $native_iret,%rax
#endif
	jne do_double_fault		/* This shouldn't happen... */
	jne do_double_fault		/* This shouldn't happen... */
1:
	movq PER_CPU_VAR(kernel_stack),%rax
	movq PER_CPU_VAR(kernel_stack),%rax
	subq $(6*8-KERNEL_STACK_OFFSET),%rax	/* Reset to original stack */
	subq $(6*8-KERNEL_STACK_OFFSET),%rax	/* Reset to original stack */
	movq %rax,RSP(%rdi)
	movq %rax,RSP(%rdi)
@@ -1428,7 +1420,7 @@ error_sti:
 */
 */
error_kernelspace:
error_kernelspace:
	incl %ebx
	incl %ebx
	leaq irq_return_iret(%rip),%rcx
	leaq native_irq_return_iret(%rip),%rcx
	cmpq %rcx,RIP+8(%rsp)
	cmpq %rcx,RIP+8(%rsp)
	je error_swapgs
	je error_swapgs
	movl %ecx,%eax	/* zero extend */
	movl %ecx,%eax	/* zero extend */
+0 −2
Original line number Original line Diff line number Diff line
@@ -6,7 +6,6 @@ DEF_NATIVE(pv_irq_ops, irq_disable, "cli");
DEF_NATIVE(pv_irq_ops, irq_enable, "sti");
DEF_NATIVE(pv_irq_ops, irq_enable, "sti");
DEF_NATIVE(pv_irq_ops, restore_fl, "pushq %rdi; popfq");
DEF_NATIVE(pv_irq_ops, restore_fl, "pushq %rdi; popfq");
DEF_NATIVE(pv_irq_ops, save_fl, "pushfq; popq %rax");
DEF_NATIVE(pv_irq_ops, save_fl, "pushfq; popq %rax");
DEF_NATIVE(pv_cpu_ops, iret, "iretq");
DEF_NATIVE(pv_mmu_ops, read_cr2, "movq %cr2, %rax");
DEF_NATIVE(pv_mmu_ops, read_cr2, "movq %cr2, %rax");
DEF_NATIVE(pv_mmu_ops, read_cr3, "movq %cr3, %rax");
DEF_NATIVE(pv_mmu_ops, read_cr3, "movq %cr3, %rax");
DEF_NATIVE(pv_mmu_ops, write_cr3, "movq %rdi, %cr3");
DEF_NATIVE(pv_mmu_ops, write_cr3, "movq %rdi, %cr3");
@@ -50,7 +49,6 @@ unsigned native_patch(u8 type, u16 clobbers, void *ibuf,
		PATCH_SITE(pv_irq_ops, save_fl);
		PATCH_SITE(pv_irq_ops, save_fl);
		PATCH_SITE(pv_irq_ops, irq_enable);
		PATCH_SITE(pv_irq_ops, irq_enable);
		PATCH_SITE(pv_irq_ops, irq_disable);
		PATCH_SITE(pv_irq_ops, irq_disable);
		PATCH_SITE(pv_cpu_ops, iret);
		PATCH_SITE(pv_cpu_ops, irq_enable_sysexit);
		PATCH_SITE(pv_cpu_ops, irq_enable_sysexit);
		PATCH_SITE(pv_cpu_ops, usergs_sysret32);
		PATCH_SITE(pv_cpu_ops, usergs_sysret32);
		PATCH_SITE(pv_cpu_ops, usergs_sysret64);
		PATCH_SITE(pv_cpu_ops, usergs_sysret64);