Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 6620ef28 authored by Borislav Petkov's avatar Borislav Petkov
Browse files

x86/lib/clear_page_64.S: Convert to ALTERNATIVE_2 macro



Move clear_page() up so that we can get 2-byte forward JMPs when
patching:

  apply_alternatives: feat: 3*32+16, old: (ffffffff8130adb0, len: 5), repl: (ffffffff81d0b859, len: 5)
  ffffffff8130adb0: alt_insn: 90 90 90 90 90
  recompute_jump: new_displ: 0x0000003e
  ffffffff81d0b859: rpl_insn: eb 3e 66 66 90

even though the compiler generated 5-byte JMPs which we padded with 5
NOPs.

Also, make the REP_GOOD version be the default as the majority of
machines set REP_GOOD. This way we get to save ourselves the JMP:

  old insn VA: 0xffffffff813038b0, CPU feat: X86_FEATURE_REP_GOOD, size: 5, padlen: 0
  clear_page:

  ffffffff813038b0 <clear_page>:
  ffffffff813038b0:       e9 0b 00 00 00          jmpq ffffffff813038c0
  repl insn: 0xffffffff81cf0e92, size: 0

  old insn VA: 0xffffffff813038b0, CPU feat: X86_FEATURE_ERMS, size: 5, padlen: 0
  clear_page:

  ffffffff813038b0 <clear_page>:
  ffffffff813038b0:       e9 0b 00 00 00          jmpq ffffffff813038c0
  repl insn: 0xffffffff81cf0e92, size: 5
   ffffffff81cf0e92:      e9 69 2a 61 ff          jmpq ffffffff81303900

  ffffffff813038b0 <clear_page>:
  ffffffff813038b0:       e9 69 2a 61 ff          jmpq ffffffff8091631e

Signed-off-by: default avatarBorislav Petkov <bp@suse.de>
parent 8e65f6e0
Loading
Loading
Loading
Loading
+27 −39
Original line number Diff line number Diff line
#include <linux/linkage.h>
#include <asm/dwarf2.h>
#include <asm/cpufeature.h>
#include <asm/alternative-asm.h>

/*
 * Most CPUs support enhanced REP MOVSB/STOSB instructions. It is
 * recommended to use this when possible and we do use them by default.
 * If enhanced REP MOVSB/STOSB is not available, try to use fast string.
 * Otherwise, use original.
 */

/*
 * Zero a page.
 * rdi	page
 * %rdi	- page
 */
ENTRY(clear_page_c)
ENTRY(clear_page)
	CFI_STARTPROC

	ALTERNATIVE_2 "jmp clear_page_orig", "", X86_FEATURE_REP_GOOD, \
		      "jmp clear_page_c_e", X86_FEATURE_ERMS

	movl $4096/8,%ecx
	xorl %eax,%eax
	rep stosq
	ret
	CFI_ENDPROC
ENDPROC(clear_page_c)
ENDPROC(clear_page)

ENTRY(clear_page_c_e)
ENTRY(clear_page_orig)
	CFI_STARTPROC
	movl $4096,%ecx
	xorl %eax,%eax
	rep stosb
	ret
	CFI_ENDPROC
ENDPROC(clear_page_c_e)

ENTRY(clear_page)
	CFI_STARTPROC
	xorl   %eax,%eax
	movl   $4096/64,%ecx
	.p2align 4
@@ -45,29 +49,13 @@ ENTRY(clear_page)
	nop
	ret
	CFI_ENDPROC
.Lclear_page_end:
ENDPROC(clear_page)

	/*
	 * Some CPUs support enhanced REP MOVSB/STOSB instructions.
	 * It is recommended to use this when possible.
	 * If enhanced REP MOVSB/STOSB is not available, try to use fast string.
	 * Otherwise, use original function.
	 *
	 */

#include <asm/cpufeature.h>
ENDPROC(clear_page_orig)

	.section .altinstr_replacement,"ax"
1:	.byte 0xeb					/* jmp <disp8> */
	.byte (clear_page_c - clear_page) - (2f - 1b)	/* offset */
2:	.byte 0xeb					/* jmp <disp8> */
	.byte (clear_page_c_e - clear_page) - (3f - 2b)	/* offset */
3:
	.previous
	.section .altinstructions,"a"
	altinstruction_entry clear_page,1b,X86_FEATURE_REP_GOOD,\
			     .Lclear_page_end-clear_page, 2b-1b, 0
	altinstruction_entry clear_page,2b,X86_FEATURE_ERMS,   \
			     .Lclear_page_end-clear_page,3b-2b, 0
	.previous
ENTRY(clear_page_c_e)
	CFI_STARTPROC
	movl $4096,%ecx
	xorl %eax,%eax
	rep stosb
	ret
	CFI_ENDPROC
ENDPROC(clear_page_c_e)