Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 65df5774 authored by Mathias Krause's avatar Mathias Krause Committed by Herbert Xu
Browse files

crypto: sha1 - use Kbuild supplied flags for AVX test



Commit ea4d26ae ("raid5: add AVX optimized RAID5 checksumming")
introduced x86/ arch wide defines for AFLAGS and CFLAGS indicating AVX
support in binutils based on the same test we have in x86/crypto/ right
now. To minimize duplication drop our implementation in favour to the
one in x86/.

Signed-off-by: default avatarMathias Krause <minipli@googlemail.com>
Signed-off-by: default avatarHerbert Xu <herbert@gondor.apana.org.au>
parent 4e3c8a1b
Loading
Loading
Loading
Loading
+0 −7
Original line number Original line Diff line number Diff line
@@ -34,12 +34,5 @@ salsa20-x86_64-y := salsa20-x86_64-asm_64.o salsa20_glue.o
serpent-sse2-x86_64-y := serpent-sse2-x86_64-asm_64.o serpent_sse2_glue.o
serpent-sse2-x86_64-y := serpent-sse2-x86_64-asm_64.o serpent_sse2_glue.o


aesni-intel-y := aesni-intel_asm.o aesni-intel_glue.o fpu.o
aesni-intel-y := aesni-intel_asm.o aesni-intel_glue.o fpu.o

ghash-clmulni-intel-y := ghash-clmulni-intel_asm.o ghash-clmulni-intel_glue.o
ghash-clmulni-intel-y := ghash-clmulni-intel_asm.o ghash-clmulni-intel_glue.o

# enable AVX support only when $(AS) can actually assemble the instructions
ifeq ($(call as-instr,vpxor %xmm0$(comma)%xmm1$(comma)%xmm2,yes,no),yes)
AFLAGS_sha1_ssse3_asm.o += -DSHA1_ENABLE_AVX_SUPPORT
CFLAGS_sha1_ssse3_glue.o += -DSHA1_ENABLE_AVX_SUPPORT
endif
sha1-ssse3-y := sha1_ssse3_asm.o sha1_ssse3_glue.o
sha1-ssse3-y := sha1_ssse3_asm.o sha1_ssse3_glue.o
+1 −1
Original line number Original line Diff line number Diff line
@@ -468,7 +468,7 @@ W_PRECALC_SSSE3
 */
 */
SHA1_VECTOR_ASM     sha1_transform_ssse3
SHA1_VECTOR_ASM     sha1_transform_ssse3


#ifdef SHA1_ENABLE_AVX_SUPPORT
#ifdef CONFIG_AS_AVX


.macro W_PRECALC_AVX
.macro W_PRECALC_AVX


+3 −3
Original line number Original line Diff line number Diff line
@@ -35,7 +35,7 @@


asmlinkage void sha1_transform_ssse3(u32 *digest, const char *data,
asmlinkage void sha1_transform_ssse3(u32 *digest, const char *data,
				     unsigned int rounds);
				     unsigned int rounds);
#ifdef SHA1_ENABLE_AVX_SUPPORT
#ifdef CONFIG_AS_AVX
asmlinkage void sha1_transform_avx(u32 *digest, const char *data,
asmlinkage void sha1_transform_avx(u32 *digest, const char *data,
				   unsigned int rounds);
				   unsigned int rounds);
#endif
#endif
@@ -184,7 +184,7 @@ static struct shash_alg alg = {
	}
	}
};
};


#ifdef SHA1_ENABLE_AVX_SUPPORT
#ifdef CONFIG_AS_AVX
static bool __init avx_usable(void)
static bool __init avx_usable(void)
{
{
	u64 xcr0;
	u64 xcr0;
@@ -209,7 +209,7 @@ static int __init sha1_ssse3_mod_init(void)
	if (cpu_has_ssse3)
	if (cpu_has_ssse3)
		sha1_transform_asm = sha1_transform_ssse3;
		sha1_transform_asm = sha1_transform_ssse3;


#ifdef SHA1_ENABLE_AVX_SUPPORT
#ifdef CONFIG_AS_AVX
	/* allow AVX to override SSSE3, it's a little faster */
	/* allow AVX to override SSSE3, it's a little faster */
	if (avx_usable())
	if (avx_usable())
		sha1_transform_asm = sha1_transform_avx;
		sha1_transform_asm = sha1_transform_avx;