Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 1ad936e8 authored by Kent Yoder's avatar Kent Yoder Committed by Benjamin Herrenschmidt
Browse files

drivers/crypto/nx: Fixes for multiple races and issues



Fixes a race on driver init with registering algorithms where the
driver status flag wasn't being set before self testing started.

  Added the cra_alignmask field for CBC and ECB modes.

  Fixed a bug in GCM where AES block size was being used instead of
authsize.

  Removed use of blkcipher_walk routines for scatterlist processing.
Corner cases in the code prevent us from processing an entire
scatterlist at a time and walking the buffers in block sized chunks
turns out to be unecessary anyway.

  Fixed off-by-one error in saving off extra data in the sha code.

  Fixed accounting error for number of bytes processed in the sha code.

Signed-off-by: default avatarKent Yoder <key@linux.vnet.ibm.com>
Signed-off-by: default avatarBenjamin Herrenschmidt <benh@kernel.crashing.org>
parent 519fe2ec
Loading
Loading
Loading
Loading
+1 −0
Original line number Original line Diff line number Diff line
@@ -126,6 +126,7 @@ struct crypto_alg nx_cbc_aes_alg = {
	.cra_blocksize   = AES_BLOCK_SIZE,
	.cra_blocksize   = AES_BLOCK_SIZE,
	.cra_ctxsize     = sizeof(struct nx_crypto_ctx),
	.cra_ctxsize     = sizeof(struct nx_crypto_ctx),
	.cra_type        = &crypto_blkcipher_type,
	.cra_type        = &crypto_blkcipher_type,
	.cra_alignmask   = 0xf,
	.cra_module      = THIS_MODULE,
	.cra_module      = THIS_MODULE,
	.cra_init        = nx_crypto_ctx_aes_cbc_init,
	.cra_init        = nx_crypto_ctx_aes_cbc_init,
	.cra_exit        = nx_crypto_ctx_exit,
	.cra_exit        = nx_crypto_ctx_exit,
+1 −0
Original line number Original line Diff line number Diff line
@@ -123,6 +123,7 @@ struct crypto_alg nx_ecb_aes_alg = {
	.cra_priority    = 300,
	.cra_priority    = 300,
	.cra_flags       = CRYPTO_ALG_TYPE_BLKCIPHER,
	.cra_flags       = CRYPTO_ALG_TYPE_BLKCIPHER,
	.cra_blocksize   = AES_BLOCK_SIZE,
	.cra_blocksize   = AES_BLOCK_SIZE,
	.cra_alignmask   = 0xf,
	.cra_ctxsize     = sizeof(struct nx_crypto_ctx),
	.cra_ctxsize     = sizeof(struct nx_crypto_ctx),
	.cra_type        = &crypto_blkcipher_type,
	.cra_type        = &crypto_blkcipher_type,
	.cra_module      = THIS_MODULE,
	.cra_module      = THIS_MODULE,
+1 −1
Original line number Original line Diff line number Diff line
@@ -219,7 +219,7 @@ static int gcm_aes_nx_crypt(struct aead_request *req, int enc)
	if (enc)
	if (enc)
		NX_CPB_FDM(csbcpb) |= NX_FDM_ENDE_ENCRYPT;
		NX_CPB_FDM(csbcpb) |= NX_FDM_ENDE_ENCRYPT;
	else
	else
		nbytes -= AES_BLOCK_SIZE;
		nbytes -= crypto_aead_authsize(crypto_aead_reqtfm(req));


	csbcpb->cpb.aes_gcm.bit_length_data = nbytes * 8;
	csbcpb->cpb.aes_gcm.bit_length_data = nbytes * 8;


+5 −3
Original line number Original line Diff line number Diff line
@@ -69,7 +69,7 @@ static int nx_sha256_update(struct shash_desc *desc, const u8 *data,
	 *  1: <= SHA256_BLOCK_SIZE: copy into state, return 0
	 *  1: <= SHA256_BLOCK_SIZE: copy into state, return 0
	 *  2: > SHA256_BLOCK_SIZE: process X blocks, copy in leftover
	 *  2: > SHA256_BLOCK_SIZE: process X blocks, copy in leftover
	 */
	 */
	if (len + sctx->count <= SHA256_BLOCK_SIZE) {
	if (len + sctx->count < SHA256_BLOCK_SIZE) {
		memcpy(sctx->buf + sctx->count, data, len);
		memcpy(sctx->buf + sctx->count, data, len);
		sctx->count += len;
		sctx->count += len;
		goto out;
		goto out;
@@ -110,6 +110,7 @@ static int nx_sha256_update(struct shash_desc *desc, const u8 *data,
	atomic_inc(&(nx_ctx->stats->sha256_ops));
	atomic_inc(&(nx_ctx->stats->sha256_ops));


	/* copy the leftover back into the state struct */
	/* copy the leftover back into the state struct */
	if (leftover)
		memcpy(sctx->buf, data + len - leftover, leftover);
		memcpy(sctx->buf, data + len - leftover, leftover);
	sctx->count = leftover;
	sctx->count = leftover;


@@ -130,6 +131,7 @@ static int nx_sha256_final(struct shash_desc *desc, u8 *out)
	struct nx_sg *in_sg, *out_sg;
	struct nx_sg *in_sg, *out_sg;
	int rc;
	int rc;



	if (NX_CPB_FDM(csbcpb) & NX_FDM_CONTINUATION) {
	if (NX_CPB_FDM(csbcpb) & NX_FDM_CONTINUATION) {
		/* we've hit the nx chip previously, now we're finalizing,
		/* we've hit the nx chip previously, now we're finalizing,
		 * so copy over the partial digest */
		 * so copy over the partial digest */
@@ -162,7 +164,7 @@ static int nx_sha256_final(struct shash_desc *desc, u8 *out)


	atomic_inc(&(nx_ctx->stats->sha256_ops));
	atomic_inc(&(nx_ctx->stats->sha256_ops));


	atomic64_add(csbcpb->cpb.sha256.message_bit_length,
	atomic64_add(csbcpb->cpb.sha256.message_bit_length / 8,
		     &(nx_ctx->stats->sha256_bytes));
		     &(nx_ctx->stats->sha256_bytes));
	memcpy(out, csbcpb->cpb.sha256.message_digest, SHA256_DIGEST_SIZE);
	memcpy(out, csbcpb->cpb.sha256.message_digest, SHA256_DIGEST_SIZE);
out:
out:
+4 −3
Original line number Original line Diff line number Diff line
@@ -69,7 +69,7 @@ static int nx_sha512_update(struct shash_desc *desc, const u8 *data,
	 *  1: <= SHA512_BLOCK_SIZE: copy into state, return 0
	 *  1: <= SHA512_BLOCK_SIZE: copy into state, return 0
	 *  2: > SHA512_BLOCK_SIZE: process X blocks, copy in leftover
	 *  2: > SHA512_BLOCK_SIZE: process X blocks, copy in leftover
	 */
	 */
	if ((u64)len + sctx->count[0] <= SHA512_BLOCK_SIZE) {
	if ((u64)len + sctx->count[0] < SHA512_BLOCK_SIZE) {
		memcpy(sctx->buf + sctx->count[0], data, len);
		memcpy(sctx->buf + sctx->count[0], data, len);
		sctx->count[0] += len;
		sctx->count[0] += len;
		goto out;
		goto out;
@@ -110,6 +110,7 @@ static int nx_sha512_update(struct shash_desc *desc, const u8 *data,
	atomic_inc(&(nx_ctx->stats->sha512_ops));
	atomic_inc(&(nx_ctx->stats->sha512_ops));


	/* copy the leftover back into the state struct */
	/* copy the leftover back into the state struct */
	if (leftover)
		memcpy(sctx->buf, data + len - leftover, leftover);
		memcpy(sctx->buf, data + len - leftover, leftover);
	sctx->count[0] = leftover;
	sctx->count[0] = leftover;


@@ -168,7 +169,7 @@ static int nx_sha512_final(struct shash_desc *desc, u8 *out)
		goto out;
		goto out;


	atomic_inc(&(nx_ctx->stats->sha512_ops));
	atomic_inc(&(nx_ctx->stats->sha512_ops));
	atomic64_add(csbcpb->cpb.sha512.message_bit_length_lo,
	atomic64_add(csbcpb->cpb.sha512.message_bit_length_lo / 8,
		     &(nx_ctx->stats->sha512_bytes));
		     &(nx_ctx->stats->sha512_bytes));


	memcpy(out, csbcpb->cpb.sha512.message_digest, SHA512_DIGEST_SIZE);
	memcpy(out, csbcpb->cpb.sha512.message_digest, SHA512_DIGEST_SIZE);
Loading