Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit c849163b authored by Marcelo Cerri's avatar Marcelo Cerri Committed by Herbert Xu
Browse files

crypto: nx - fix concurrency issue



The NX driver uses the transformation context to store several fields
containing data related to the state of the operations in progress.
Since a single tfm can be used by different kernel threads at the same
time, we need to protect the data stored into the context.

This patch makes use of spin locks to protect the data where a race
condition can happen.

Reviewed-by: default avatarFionnuala Gunter <fin@linux.vnet.ibm.com>
Reviewed-by: default avatarJoy Latten <jmlatten@linux.vnet.ibm.com>
Signed-off-by: default avatarMarcelo Cerri <mhcerri@linux.vnet.ibm.com>
Signed-off-by: default avatarHerbert Xu <herbert@gondor.apana.org.au>
parent f22d0811
Loading
Loading
Loading
Loading
+8 −2
Original line number Diff line number Diff line
@@ -70,10 +70,15 @@ static int cbc_aes_nx_crypt(struct blkcipher_desc *desc,
{
	struct nx_crypto_ctx *nx_ctx = crypto_blkcipher_ctx(desc->tfm);
	struct nx_csbcpb *csbcpb = nx_ctx->csbcpb;
	unsigned long irq_flags;
	int rc;

	if (nbytes > nx_ctx->ap->databytelen)
		return -EINVAL;
	spin_lock_irqsave(&nx_ctx->lock, irq_flags);

	if (nbytes > nx_ctx->ap->databytelen) {
		rc = -EINVAL;
		goto out;
	}

	if (enc)
		NX_CPB_FDM(csbcpb) |= NX_FDM_ENDE_ENCRYPT;
@@ -100,6 +105,7 @@ static int cbc_aes_nx_crypt(struct blkcipher_desc *desc,
	atomic64_add(csbcpb->csb.processed_byte_count,
		     &(nx_ctx->stats->aes_bytes));
out:
	spin_unlock_irqrestore(&nx_ctx->lock, irq_flags);
	return rc;
}

+16 −4
Original line number Diff line number Diff line
@@ -271,10 +271,15 @@ static int ccm_nx_decrypt(struct aead_request *req,
	unsigned int nbytes = req->cryptlen;
	unsigned int authsize = crypto_aead_authsize(crypto_aead_reqtfm(req));
	struct nx_ccm_priv *priv = &nx_ctx->priv.ccm;
	unsigned long irq_flags;
	int rc = -1;

	if (nbytes > nx_ctx->ap->databytelen)
		return -EINVAL;
	spin_lock_irqsave(&nx_ctx->lock, irq_flags);

	if (nbytes > nx_ctx->ap->databytelen) {
		rc = -EINVAL;
		goto out;
	}

	nbytes -= authsize;

@@ -308,6 +313,7 @@ static int ccm_nx_decrypt(struct aead_request *req,
	rc = memcmp(csbcpb->cpb.aes_ccm.out_pat_or_mac, priv->oauth_tag,
		    authsize) ? -EBADMSG : 0;
out:
	spin_unlock_irqrestore(&nx_ctx->lock, irq_flags);
	return rc;
}

@@ -318,10 +324,15 @@ static int ccm_nx_encrypt(struct aead_request *req,
	struct nx_csbcpb *csbcpb = nx_ctx->csbcpb;
	unsigned int nbytes = req->cryptlen;
	unsigned int authsize = crypto_aead_authsize(crypto_aead_reqtfm(req));
	unsigned long irq_flags;
	int rc = -1;

	if (nbytes > nx_ctx->ap->databytelen)
		return -EINVAL;
	spin_lock_irqsave(&nx_ctx->lock, irq_flags);

	if (nbytes > nx_ctx->ap->databytelen) {
		rc = -EINVAL;
		goto out;
	}

	rc = generate_pat(desc->info, req, nx_ctx, authsize, nbytes,
			  csbcpb->cpb.aes_ccm.in_pat_or_b0);
@@ -350,6 +361,7 @@ static int ccm_nx_encrypt(struct aead_request *req,
				 req->dst, nbytes, authsize,
				 SCATTERWALK_TO_SG);
out:
	spin_unlock_irqrestore(&nx_ctx->lock, irq_flags);
	return rc;
}

+8 −2
Original line number Diff line number Diff line
@@ -88,10 +88,15 @@ static int ctr_aes_nx_crypt(struct blkcipher_desc *desc,
{
	struct nx_crypto_ctx *nx_ctx = crypto_blkcipher_ctx(desc->tfm);
	struct nx_csbcpb *csbcpb = nx_ctx->csbcpb;
	unsigned long irq_flags;
	int rc;

	if (nbytes > nx_ctx->ap->databytelen)
		return -EINVAL;
	spin_lock_irqsave(&nx_ctx->lock, irq_flags);

	if (nbytes > nx_ctx->ap->databytelen) {
		rc = -EINVAL;
		goto out;
	}

	rc = nx_build_sg_lists(nx_ctx, desc, dst, src, nbytes,
			       csbcpb->cpb.aes_ctr.iv);
@@ -112,6 +117,7 @@ static int ctr_aes_nx_crypt(struct blkcipher_desc *desc,
	atomic64_add(csbcpb->csb.processed_byte_count,
		     &(nx_ctx->stats->aes_bytes));
out:
	spin_unlock_irqrestore(&nx_ctx->lock, irq_flags);
	return rc;
}

+8 −2
Original line number Diff line number Diff line
@@ -70,10 +70,15 @@ static int ecb_aes_nx_crypt(struct blkcipher_desc *desc,
{
	struct nx_crypto_ctx *nx_ctx = crypto_blkcipher_ctx(desc->tfm);
	struct nx_csbcpb *csbcpb = nx_ctx->csbcpb;
	unsigned long irq_flags;
	int rc;

	if (nbytes > nx_ctx->ap->databytelen)
		return -EINVAL;
	spin_lock_irqsave(&nx_ctx->lock, irq_flags);

	if (nbytes > nx_ctx->ap->databytelen) {
		rc = -EINVAL;
		goto out;
	}

	if (enc)
		NX_CPB_FDM(csbcpb) |= NX_FDM_ENDE_ENCRYPT;
@@ -98,6 +103,7 @@ static int ecb_aes_nx_crypt(struct blkcipher_desc *desc,
	atomic64_add(csbcpb->csb.processed_byte_count,
		     &(nx_ctx->stats->aes_bytes));
out:
	spin_unlock_irqrestore(&nx_ctx->lock, irq_flags);
	return rc;
}

+4 −0
Original line number Diff line number Diff line
@@ -166,8 +166,11 @@ static int gcm_aes_nx_crypt(struct aead_request *req, int enc)
	struct nx_csbcpb *csbcpb = nx_ctx->csbcpb;
	struct blkcipher_desc desc;
	unsigned int nbytes = req->cryptlen;
	unsigned long irq_flags;
	int rc = -EINVAL;

	spin_lock_irqsave(&nx_ctx->lock, irq_flags);

	if (nbytes > nx_ctx->ap->databytelen)
		goto out;

@@ -255,6 +258,7 @@ static int gcm_aes_nx_crypt(struct aead_request *req, int enc)
		     -EBADMSG : 0;
	}
out:
	spin_unlock_irqrestore(&nx_ctx->lock, irq_flags);
	return rc;
}

Loading