Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit e52b7023 authored by Eric Biggers's avatar Eric Biggers Committed by Herbert Xu
Browse files

crypto: arm64 - convert to use crypto_simd_usable()



Replace all calls to may_use_simd() in the arm64 crypto code with
crypto_simd_usable(), in order to allow testing the no-SIMD code paths.

Signed-off-by: default avatarEric Biggers <ebiggers@google.com>
Reviewed-by: default avatarArd Biesheuvel <ard.biesheuvel@linaro.org>
Signed-off-by: default avatarHerbert Xu <herbert@gondor.apana.org.au>
parent 99680c5e
Loading
Loading
Loading
Loading
+4 −3
Original line number Original line Diff line number Diff line
@@ -14,6 +14,7 @@
#include <crypto/aes.h>
#include <crypto/aes.h>
#include <crypto/scatterwalk.h>
#include <crypto/scatterwalk.h>
#include <crypto/internal/aead.h>
#include <crypto/internal/aead.h>
#include <crypto/internal/simd.h>
#include <crypto/internal/skcipher.h>
#include <crypto/internal/skcipher.h>
#include <linux/module.h>
#include <linux/module.h>


@@ -109,7 +110,7 @@ static int ccm_init_mac(struct aead_request *req, u8 maciv[], u32 msglen)
static void ccm_update_mac(struct crypto_aes_ctx *key, u8 mac[], u8 const in[],
static void ccm_update_mac(struct crypto_aes_ctx *key, u8 mac[], u8 const in[],
			   u32 abytes, u32 *macp)
			   u32 abytes, u32 *macp)
{
{
	if (may_use_simd()) {
	if (crypto_simd_usable()) {
		kernel_neon_begin();
		kernel_neon_begin();
		ce_aes_ccm_auth_data(mac, in, abytes, macp, key->key_enc,
		ce_aes_ccm_auth_data(mac, in, abytes, macp, key->key_enc,
				     num_rounds(key));
				     num_rounds(key));
@@ -255,7 +256,7 @@ static int ccm_encrypt(struct aead_request *req)


	err = skcipher_walk_aead_encrypt(&walk, req, false);
	err = skcipher_walk_aead_encrypt(&walk, req, false);


	if (may_use_simd()) {
	if (crypto_simd_usable()) {
		while (walk.nbytes) {
		while (walk.nbytes) {
			u32 tail = walk.nbytes % AES_BLOCK_SIZE;
			u32 tail = walk.nbytes % AES_BLOCK_SIZE;


@@ -313,7 +314,7 @@ static int ccm_decrypt(struct aead_request *req)


	err = skcipher_walk_aead_decrypt(&walk, req, false);
	err = skcipher_walk_aead_decrypt(&walk, req, false);


	if (may_use_simd()) {
	if (crypto_simd_usable()) {
		while (walk.nbytes) {
		while (walk.nbytes) {
			u32 tail = walk.nbytes % AES_BLOCK_SIZE;
			u32 tail = walk.nbytes % AES_BLOCK_SIZE;


+3 −2
Original line number Original line Diff line number Diff line
@@ -12,6 +12,7 @@
#include <asm/simd.h>
#include <asm/simd.h>
#include <asm/unaligned.h>
#include <asm/unaligned.h>
#include <crypto/aes.h>
#include <crypto/aes.h>
#include <crypto/internal/simd.h>
#include <linux/cpufeature.h>
#include <linux/cpufeature.h>
#include <linux/crypto.h>
#include <linux/crypto.h>
#include <linux/module.h>
#include <linux/module.h>
@@ -52,7 +53,7 @@ static void aes_cipher_encrypt(struct crypto_tfm *tfm, u8 dst[], u8 const src[])
{
{
	struct crypto_aes_ctx *ctx = crypto_tfm_ctx(tfm);
	struct crypto_aes_ctx *ctx = crypto_tfm_ctx(tfm);


	if (!may_use_simd()) {
	if (!crypto_simd_usable()) {
		__aes_arm64_encrypt(ctx->key_enc, dst, src, num_rounds(ctx));
		__aes_arm64_encrypt(ctx->key_enc, dst, src, num_rounds(ctx));
		return;
		return;
	}
	}
@@ -66,7 +67,7 @@ static void aes_cipher_decrypt(struct crypto_tfm *tfm, u8 dst[], u8 const src[])
{
{
	struct crypto_aes_ctx *ctx = crypto_tfm_ctx(tfm);
	struct crypto_aes_ctx *ctx = crypto_tfm_ctx(tfm);


	if (!may_use_simd()) {
	if (!crypto_simd_usable()) {
		__aes_arm64_decrypt(ctx->key_dec, dst, src, num_rounds(ctx));
		__aes_arm64_decrypt(ctx->key_dec, dst, src, num_rounds(ctx));
		return;
		return;
	}
	}
+2 −2
Original line number Original line Diff line number Diff line
@@ -405,7 +405,7 @@ static int ctr_encrypt_sync(struct skcipher_request *req)
	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
	struct crypto_aes_ctx *ctx = crypto_skcipher_ctx(tfm);
	struct crypto_aes_ctx *ctx = crypto_skcipher_ctx(tfm);


	if (!may_use_simd())
	if (!crypto_simd_usable())
		return aes_ctr_encrypt_fallback(ctx, req);
		return aes_ctr_encrypt_fallback(ctx, req);


	return ctr_encrypt(req);
	return ctr_encrypt(req);
@@ -642,7 +642,7 @@ static void mac_do_update(struct crypto_aes_ctx *ctx, u8 const in[], int blocks,
{
{
	int rounds = 6 + ctx->key_length / 4;
	int rounds = 6 + ctx->key_length / 4;


	if (may_use_simd()) {
	if (crypto_simd_usable()) {
		kernel_neon_begin();
		kernel_neon_begin();
		aes_mac_update(in, ctx->key_enc, rounds, blocks, dg, enc_before,
		aes_mac_update(in, ctx->key_enc, rounds, blocks, dg, enc_before,
			       enc_after);
			       enc_after);
+1 −1
Original line number Original line Diff line number Diff line
@@ -288,7 +288,7 @@ static int ctr_encrypt_sync(struct skcipher_request *req)
	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
	struct aesbs_ctr_ctx *ctx = crypto_skcipher_ctx(tfm);
	struct aesbs_ctr_ctx *ctx = crypto_skcipher_ctx(tfm);


	if (!may_use_simd())
	if (!crypto_simd_usable())
		return aes_ctr_encrypt_fallback(&ctx->fallback, req);
		return aes_ctr_encrypt_fallback(&ctx->fallback, req);


	return ctr_encrypt(req);
	return ctr_encrypt(req);
+3 −2
Original line number Original line Diff line number Diff line
@@ -21,6 +21,7 @@


#include <crypto/algapi.h>
#include <crypto/algapi.h>
#include <crypto/chacha.h>
#include <crypto/chacha.h>
#include <crypto/internal/simd.h>
#include <crypto/internal/skcipher.h>
#include <crypto/internal/skcipher.h>
#include <linux/kernel.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/module.h>
@@ -90,7 +91,7 @@ static int chacha_neon(struct skcipher_request *req)
	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
	struct chacha_ctx *ctx = crypto_skcipher_ctx(tfm);
	struct chacha_ctx *ctx = crypto_skcipher_ctx(tfm);


	if (req->cryptlen <= CHACHA_BLOCK_SIZE || !may_use_simd())
	if (req->cryptlen <= CHACHA_BLOCK_SIZE || !crypto_simd_usable())
		return crypto_chacha_crypt(req);
		return crypto_chacha_crypt(req);


	return chacha_neon_stream_xor(req, ctx, req->iv);
	return chacha_neon_stream_xor(req, ctx, req->iv);
@@ -104,7 +105,7 @@ static int xchacha_neon(struct skcipher_request *req)
	u32 state[16];
	u32 state[16];
	u8 real_iv[16];
	u8 real_iv[16];


	if (req->cryptlen <= CHACHA_BLOCK_SIZE || !may_use_simd())
	if (req->cryptlen <= CHACHA_BLOCK_SIZE || !crypto_simd_usable())
		return crypto_xchacha_crypt(req);
		return crypto_xchacha_crypt(req);


	crypto_chacha_init(state, ctx, req->iv);
	crypto_chacha_init(state, ctx, req->iv);
Loading