Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit b59e2ae3 authored by Ard Biesheuvel's avatar Ard Biesheuvel Committed by Herbert Xu
Browse files

crypto: arm/sha256 - move SHA-224/256 ASM/NEON implementation to base layer



This removes all the boilerplate from the existing implementation,
and replaces it with calls into the base layer.

Signed-off-by: default avatarArd Biesheuvel <ard.biesheuvel@linaro.org>
Signed-off-by: default avatarHerbert Xu <herbert@gondor.apana.org.au>
parent dde00981
Loading
Loading
Loading
Loading
+26 −144
Original line number Diff line number Diff line
@@ -24,165 +24,49 @@
#include <linux/types.h>
#include <linux/string.h>
#include <crypto/sha.h>
#include <asm/byteorder.h>
#include <crypto/sha256_base.h>
#include <asm/simd.h>
#include <asm/neon.h>

#include "sha256_glue.h"

asmlinkage void sha256_block_data_order(u32 *digest, const void *data,
					unsigned int num_blks);


int sha256_init(struct shash_desc *desc)
{
	struct sha256_state *sctx = shash_desc_ctx(desc);

	sctx->state[0] = SHA256_H0;
	sctx->state[1] = SHA256_H1;
	sctx->state[2] = SHA256_H2;
	sctx->state[3] = SHA256_H3;
	sctx->state[4] = SHA256_H4;
	sctx->state[5] = SHA256_H5;
	sctx->state[6] = SHA256_H6;
	sctx->state[7] = SHA256_H7;
	sctx->count = 0;

	return 0;
}

int sha224_init(struct shash_desc *desc)
{
	struct sha256_state *sctx = shash_desc_ctx(desc);

	sctx->state[0] = SHA224_H0;
	sctx->state[1] = SHA224_H1;
	sctx->state[2] = SHA224_H2;
	sctx->state[3] = SHA224_H3;
	sctx->state[4] = SHA224_H4;
	sctx->state[5] = SHA224_H5;
	sctx->state[6] = SHA224_H6;
	sctx->state[7] = SHA224_H7;
	sctx->count = 0;

	return 0;
}

int __sha256_update(struct shash_desc *desc, const u8 *data, unsigned int len,
		    unsigned int partial)
{
	struct sha256_state *sctx = shash_desc_ctx(desc);
	unsigned int done = 0;

	sctx->count += len;

	if (partial) {
		done = SHA256_BLOCK_SIZE - partial;
		memcpy(sctx->buf + partial, data, done);
		sha256_block_data_order(sctx->state, sctx->buf, 1);
	}

	if (len - done >= SHA256_BLOCK_SIZE) {
		const unsigned int rounds = (len - done) / SHA256_BLOCK_SIZE;

		sha256_block_data_order(sctx->state, data + done, rounds);
		done += rounds * SHA256_BLOCK_SIZE;
	}

	memcpy(sctx->buf, data + done, len - done);

	return 0;
}

int sha256_update(struct shash_desc *desc, const u8 *data, unsigned int len)
int crypto_sha256_arm_update(struct shash_desc *desc, const u8 *data,
			     unsigned int len)
{
	struct sha256_state *sctx = shash_desc_ctx(desc);
	unsigned int partial = sctx->count % SHA256_BLOCK_SIZE;

	/* Handle the fast case right here */
	if (partial + len < SHA256_BLOCK_SIZE) {
		sctx->count += len;
		memcpy(sctx->buf + partial, data, len);
	/* make sure casting to sha256_block_fn() is safe */
	BUILD_BUG_ON(offsetof(struct sha256_state, state) != 0);

		return 0;
	return sha256_base_do_update(desc, data, len,
				(sha256_block_fn *)sha256_block_data_order);
}
EXPORT_SYMBOL(crypto_sha256_arm_update);

	return __sha256_update(desc, data, len, partial);
}

/* Add padding and return the message digest. */
static int sha256_final(struct shash_desc *desc, u8 *out)
{
	struct sha256_state *sctx = shash_desc_ctx(desc);
	unsigned int i, index, padlen;
	__be32 *dst = (__be32 *)out;
	__be64 bits;
	static const u8 padding[SHA256_BLOCK_SIZE] = { 0x80, };

	/* save number of bits */
	bits = cpu_to_be64(sctx->count << 3);

	/* Pad out to 56 mod 64 and append length */
	index = sctx->count % SHA256_BLOCK_SIZE;
	padlen = (index < 56) ? (56 - index) : ((SHA256_BLOCK_SIZE+56)-index);

	/* We need to fill a whole block for __sha256_update */
	if (padlen <= 56) {
		sctx->count += padlen;
		memcpy(sctx->buf + index, padding, padlen);
	} else {
		__sha256_update(desc, padding, padlen, index);
	}
	__sha256_update(desc, (const u8 *)&bits, sizeof(bits), 56);

	/* Store state in digest */
	for (i = 0; i < 8; i++)
		dst[i] = cpu_to_be32(sctx->state[i]);

	/* Wipe context */
	memset(sctx, 0, sizeof(*sctx));

	return 0;
}

static int sha224_final(struct shash_desc *desc, u8 *out)
{
	u8 D[SHA256_DIGEST_SIZE];

	sha256_final(desc, D);

	memcpy(out, D, SHA224_DIGEST_SIZE);
	memzero_explicit(D, SHA256_DIGEST_SIZE);

	return 0;
}

int sha256_export(struct shash_desc *desc, void *out)
{
	struct sha256_state *sctx = shash_desc_ctx(desc);

	memcpy(out, sctx, sizeof(*sctx));

	return 0;
	sha256_base_do_finalize(desc,
				(sha256_block_fn *)sha256_block_data_order);
	return sha256_base_finish(desc, out);
}

int sha256_import(struct shash_desc *desc, const void *in)
int crypto_sha256_arm_finup(struct shash_desc *desc, const u8 *data,
			    unsigned int len, u8 *out)
{
	struct sha256_state *sctx = shash_desc_ctx(desc);

	memcpy(sctx, in, sizeof(*sctx));

	return 0;
	sha256_base_do_update(desc, data, len,
			      (sha256_block_fn *)sha256_block_data_order);
	return sha256_final(desc, out);
}
EXPORT_SYMBOL(crypto_sha256_arm_finup);

static struct shash_alg algs[] = { {
	.digestsize	=	SHA256_DIGEST_SIZE,
	.init		=	sha256_init,
	.update		=	sha256_update,
	.init		=	sha256_base_init,
	.update		=	crypto_sha256_arm_update,
	.final		=	sha256_final,
	.export		=	sha256_export,
	.import		=	sha256_import,
	.finup		=	crypto_sha256_arm_finup,
	.descsize	=	sizeof(struct sha256_state),
	.statesize	=	sizeof(struct sha256_state),
	.base		=	{
		.cra_name	=	"sha256",
		.cra_driver_name =	"sha256-asm",
@@ -193,13 +77,11 @@ static struct shash_alg algs[] = { {
	}
}, {
	.digestsize	=	SHA224_DIGEST_SIZE,
	.init		=	sha224_init,
	.update		=	sha256_update,
	.final		=	sha224_final,
	.export		=	sha256_export,
	.import		=	sha256_import,
	.init		=	sha224_base_init,
	.update		=	crypto_sha256_arm_update,
	.final		=	sha256_final,
	.finup		=	crypto_sha256_arm_finup,
	.descsize	=	sizeof(struct sha256_state),
	.statesize	=	sizeof(struct sha256_state),
	.base		=	{
		.cra_name	=	"sha224",
		.cra_driver_name =	"sha224-asm",
+4 −13
Original line number Diff line number Diff line
@@ -2,22 +2,13 @@
#define _CRYPTO_SHA256_GLUE_H

#include <linux/crypto.h>
#include <crypto/sha.h>

extern struct shash_alg sha256_neon_algs[2];

extern int sha256_init(struct shash_desc *desc);

extern int sha224_init(struct shash_desc *desc);

extern int __sha256_update(struct shash_desc *desc, const u8 *data,
			   unsigned int len, unsigned int partial);

extern int sha256_update(struct shash_desc *desc, const u8 *data,
int crypto_sha256_arm_update(struct shash_desc *desc, const u8 *data,
			     unsigned int len);

extern int sha256_export(struct shash_desc *desc, void *out);

extern int sha256_import(struct shash_desc *desc, const void *in);
int crypto_sha256_arm_finup(struct shash_desc *desc, const u8 *data,
			    unsigned int len, u8 *hash);

#endif /* _CRYPTO_SHA256_GLUE_H */
+36 −107
Original line number Diff line number Diff line
@@ -19,131 +19,62 @@
#include <linux/types.h>
#include <linux/string.h>
#include <crypto/sha.h>
#include <crypto/sha256_base.h>
#include <asm/byteorder.h>
#include <asm/simd.h>
#include <asm/neon.h>

#include "sha256_glue.h"

asmlinkage void sha256_block_data_order_neon(u32 *digest, const void *data,
					     unsigned int num_blks);


static int __sha256_neon_update(struct shash_desc *desc, const u8 *data,
				unsigned int len, unsigned int partial)
{
	struct sha256_state *sctx = shash_desc_ctx(desc);
	unsigned int done = 0;

	sctx->count += len;

	if (partial) {
		done = SHA256_BLOCK_SIZE - partial;
		memcpy(sctx->buf + partial, data, done);
		sha256_block_data_order_neon(sctx->state, sctx->buf, 1);
	}

	if (len - done >= SHA256_BLOCK_SIZE) {
		const unsigned int rounds = (len - done) / SHA256_BLOCK_SIZE;

		sha256_block_data_order_neon(sctx->state, data + done, rounds);
		done += rounds * SHA256_BLOCK_SIZE;
	}

	memcpy(sctx->buf, data + done, len - done);

	return 0;
}

static int sha256_neon_update(struct shash_desc *desc, const u8 *data,
static int sha256_update(struct shash_desc *desc, const u8 *data,
			 unsigned int len)
{
	struct sha256_state *sctx = shash_desc_ctx(desc);
	unsigned int partial = sctx->count % SHA256_BLOCK_SIZE;
	int res;

	/* Handle the fast case right here */
	if (partial + len < SHA256_BLOCK_SIZE) {
		sctx->count += len;
		memcpy(sctx->buf + partial, data, len);

		return 0;
	}
	if (!may_use_simd() ||
	    (sctx->count % SHA256_BLOCK_SIZE) + len < SHA256_BLOCK_SIZE)
		return crypto_sha256_arm_update(desc, data, len);

	if (!may_use_simd()) {
		res = __sha256_update(desc, data, len, partial);
	} else {
	kernel_neon_begin();
		res = __sha256_neon_update(desc, data, len, partial);
	sha256_base_do_update(desc, data, len,
			(sha256_block_fn *)sha256_block_data_order_neon);
	kernel_neon_end();
	}

	return res;
	return 0;
}

/* Add padding and return the message digest. */
static int sha256_neon_final(struct shash_desc *desc, u8 *out)
static int sha256_finup(struct shash_desc *desc, const u8 *data,
			unsigned int len, u8 *out)
{
	struct sha256_state *sctx = shash_desc_ctx(desc);
	unsigned int i, index, padlen;
	__be32 *dst = (__be32 *)out;
	__be64 bits;
	static const u8 padding[SHA256_BLOCK_SIZE] = { 0x80, };

	/* save number of bits */
	bits = cpu_to_be64(sctx->count << 3);
	if (!may_use_simd())
		return crypto_sha256_arm_finup(desc, data, len, out);

	/* Pad out to 56 mod 64 and append length */
	index = sctx->count % SHA256_BLOCK_SIZE;
	padlen = (index < 56) ? (56 - index) : ((SHA256_BLOCK_SIZE+56)-index);

	if (!may_use_simd()) {
		sha256_update(desc, padding, padlen);
		sha256_update(desc, (const u8 *)&bits, sizeof(bits));
	} else {
	kernel_neon_begin();
		/* We need to fill a whole block for __sha256_neon_update() */
		if (padlen <= 56) {
			sctx->count += padlen;
			memcpy(sctx->buf + index, padding, padlen);
		} else {
			__sha256_neon_update(desc, padding, padlen, index);
		}
		__sha256_neon_update(desc, (const u8 *)&bits,
					sizeof(bits), 56);
	if (len)
		sha256_base_do_update(desc, data, len,
			(sha256_block_fn *)sha256_block_data_order_neon);
	sha256_base_do_finalize(desc,
			(sha256_block_fn *)sha256_block_data_order_neon);
	kernel_neon_end();
	}

	/* Store state in digest */
	for (i = 0; i < 8; i++)
		dst[i] = cpu_to_be32(sctx->state[i]);

	/* Wipe context */
	memzero_explicit(sctx, sizeof(*sctx));

	return 0;
	return sha256_base_finish(desc, out);
}

static int sha224_neon_final(struct shash_desc *desc, u8 *out)
static int sha256_final(struct shash_desc *desc, u8 *out)
{
	u8 D[SHA256_DIGEST_SIZE];

	sha256_neon_final(desc, D);

	memcpy(out, D, SHA224_DIGEST_SIZE);
	memzero_explicit(D, SHA256_DIGEST_SIZE);

	return 0;
	return sha256_finup(desc, NULL, 0, out);
}

struct shash_alg sha256_neon_algs[] = { {
	.digestsize	=	SHA256_DIGEST_SIZE,
	.init		=	sha256_init,
	.update		=	sha256_neon_update,
	.final		=	sha256_neon_final,
	.export		=	sha256_export,
	.import		=	sha256_import,
	.init		=	sha256_base_init,
	.update		=	sha256_update,
	.final		=	sha256_final,
	.finup		=	sha256_finup,
	.descsize	=	sizeof(struct sha256_state),
	.statesize	=	sizeof(struct sha256_state),
	.base		=	{
		.cra_name	=	"sha256",
		.cra_driver_name =	"sha256-neon",
@@ -154,13 +85,11 @@ struct shash_alg sha256_neon_algs[] = { {
	}
}, {
	.digestsize	=	SHA224_DIGEST_SIZE,
	.init		=	sha224_init,
	.update		=	sha256_neon_update,
	.final		=	sha224_neon_final,
	.export		=	sha256_export,
	.import		=	sha256_import,
	.init		=	sha224_base_init,
	.update		=	sha256_update,
	.final		=	sha256_final,
	.finup		=	sha256_finup,
	.descsize	=	sizeof(struct sha256_state),
	.statesize	=	sizeof(struct sha256_state),
	.base		=	{
		.cra_name	=	"sha224",
		.cra_driver_name =	"sha224-neon",