Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit a7bea830 authored by Jan Beulich's avatar Jan Beulich Committed by Ingo Molnar
Browse files

x86/asm/64: Use 32-bit XOR to zero registers



Some Intel CPUs don't recognize 64-bit XORs as zeroing idioms. Zeroing
idioms don't require execution bandwidth, as they're being taken care
of in the frontend (through register renaming). Use 32-bit XORs instead.

Signed-off-by: default avatarJan Beulich <jbeulich@suse.com>
Cc: Alok Kataria <akataria@vmware.com>
Cc: Andy Lutomirski <luto@kernel.org>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Brian Gerst <brgerst@gmail.com>
Cc: Denys Vlasenko <dvlasenk@redhat.com>
Cc: H. Peter Anvin <hpa@zytor.com>
Cc: Josh Poimboeuf <jpoimboe@redhat.com>
Cc: Juergen Gross <jgross@suse.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: davem@davemloft.net
Cc: herbert@gondor.apana.org.au
Cc: pavel@ucw.cz
Cc: rjw@rjwysocki.net
Link: http://lkml.kernel.org/r/5B39FF1A02000078001CFB54@prv1-mh.provo.novell.com


Signed-off-by: default avatarIngo Molnar <mingo@kernel.org>
parent c5fcdbf1
Loading
Loading
Loading
Loading
+1 −1
Original line number Diff line number Diff line
@@ -75,7 +75,7 @@
 *   %r9
 */
__load_partial:
	xor %r9, %r9
	xor %r9d, %r9d
	pxor MSG, MSG

	mov LEN, %r8
+1 −1
Original line number Diff line number Diff line
@@ -66,7 +66,7 @@
 *   %r9
 */
__load_partial:
	xor %r9, %r9
	xor %r9d, %r9d
	pxor MSG0, MSG0
	pxor MSG1, MSG1

+1 −1
Original line number Diff line number Diff line
@@ -59,7 +59,7 @@
 *   %r9
 */
__load_partial:
	xor %r9, %r9
	xor %r9d, %r9d
	pxor MSG, MSG

	mov LEN, %r8
+4 −4
Original line number Diff line number Diff line
@@ -258,7 +258,7 @@ ALL_F: .octa 0xffffffffffffffffffffffffffffffff
.macro GCM_INIT Iv SUBKEY AAD AADLEN
	mov \AADLEN, %r11
	mov %r11, AadLen(%arg2) # ctx_data.aad_length = aad_length
	xor %r11, %r11
	xor %r11d, %r11d
	mov %r11, InLen(%arg2) # ctx_data.in_length = 0
	mov %r11, PBlockLen(%arg2) # ctx_data.partial_block_length = 0
	mov %r11, PBlockEncKey(%arg2) # ctx_data.partial_block_enc_key = 0
@@ -286,7 +286,7 @@ ALL_F: .octa 0xffffffffffffffffffffffffffffffff
	movdqu HashKey(%arg2), %xmm13
	add %arg5, InLen(%arg2)

	xor %r11, %r11 # initialise the data pointer offset as zero
	xor %r11d, %r11d # initialise the data pointer offset as zero
	PARTIAL_BLOCK %arg3 %arg4 %arg5 %r11 %xmm8 \operation

	sub %r11, %arg5		# sub partial block data used
@@ -702,7 +702,7 @@ _no_extra_mask_1_\@:

	# GHASH computation for the last <16 Byte block
	GHASH_MUL \AAD_HASH, %xmm13, %xmm0, %xmm10, %xmm11, %xmm5, %xmm6
	xor	%rax,%rax
	xor	%eax, %eax

	mov	%rax, PBlockLen(%arg2)
	jmp	_dec_done_\@
@@ -737,7 +737,7 @@ _no_extra_mask_2_\@:

	# GHASH computation for the last <16 Byte block
	GHASH_MUL \AAD_HASH, %xmm13, %xmm0, %xmm10, %xmm11, %xmm5, %xmm6
	xor	%rax,%rax
	xor	%eax, %eax

	mov	%rax, PBlockLen(%arg2)
	jmp	_encode_done_\@
+2 −2
Original line number Diff line number Diff line
@@ -463,7 +463,7 @@ _get_AAD_rest_final\@:

_get_AAD_done\@:
	# initialize the data pointer offset as zero
	xor     %r11, %r11
	xor     %r11d, %r11d

	# start AES for num_initial_blocks blocks
	mov     arg5, %rax                     # rax = *Y0
@@ -1770,7 +1770,7 @@ _get_AAD_rest_final\@:

_get_AAD_done\@:
	# initialize the data pointer offset as zero
	xor     %r11, %r11
	xor     %r11d, %r11d

	# start AES for num_initial_blocks blocks
	mov     arg5, %rax                     # rax = *Y0
Loading