Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit c957f8b3 authored by Antoine Tenart's avatar Antoine Tenart Committed by Herbert Xu
Browse files

crypto: inside-secure - avoid unmapping DMA memory that was not mapped



This patch adds a parameter in the SafeXcel ahash request structure to
keep track of the number of SG entries mapped. This allows not to call
dma_unmap_sg() when dma_map_sg() wasn't called in the first place. This
also removes a warning when the debugging of the DMA-API is enabled in
the kernel configuration: "DMA-API: device driver tries to free DMA
memory it has not allocated".

Cc: stable@vger.kernel.org
Fixes: 1b44c5a6 ("crypto: inside-secure - add SafeXcel EIP197 crypto engine driver")
Signed-off-by: default avatarAntoine Tenart <antoine.tenart@free-electrons.com>
Signed-off-by: default avatarHerbert Xu <herbert@gondor.apana.org.au>
parent 0b5a7f71
Loading
Loading
Loading
Loading
+12 −8
Original line number Diff line number Diff line
@@ -33,6 +33,8 @@ struct safexcel_ahash_req {
	bool hmac;
	bool needs_inv;

	int nents;

	u8 state_sz;    /* expected sate size, only set once */
	u32 state[SHA256_DIGEST_SIZE / sizeof(u32)] __aligned(sizeof(u32));

@@ -151,8 +153,10 @@ static int safexcel_handle_req_result(struct safexcel_crypto_priv *priv, int rin
		memcpy(areq->result, sreq->state,
		       crypto_ahash_digestsize(ahash));

	dma_unmap_sg(priv->dev, areq->src,
		     sg_nents_for_len(areq->src, areq->nbytes), DMA_TO_DEVICE);
	if (sreq->nents) {
		dma_unmap_sg(priv->dev, areq->src, sreq->nents, DMA_TO_DEVICE);
		sreq->nents = 0;
	}

	safexcel_free_context(priv, async, sreq->state_sz);

@@ -177,7 +181,7 @@ static int safexcel_ahash_send_req(struct crypto_async_request *async, int ring,
	struct safexcel_command_desc *cdesc, *first_cdesc = NULL;
	struct safexcel_result_desc *rdesc;
	struct scatterlist *sg;
	int i, nents, queued, len, cache_len, extra, n_cdesc = 0, ret = 0;
	int i, queued, len, cache_len, extra, n_cdesc = 0, ret = 0;

	queued = len = req->len - req->processed;
	if (queued < crypto_ahash_blocksize(ahash))
@@ -233,15 +237,15 @@ static int safexcel_ahash_send_req(struct crypto_async_request *async, int ring,
	}

	/* Now handle the current ahash request buffer(s) */
	nents = dma_map_sg(priv->dev, areq->src,
	req->nents = dma_map_sg(priv->dev, areq->src,
				sg_nents_for_len(areq->src, areq->nbytes),
				DMA_TO_DEVICE);
	if (!nents) {
	if (!req->nents) {
		ret = -ENOMEM;
		goto cdesc_rollback;
	}

	for_each_sg(areq->src, sg, nents, i) {
	for_each_sg(areq->src, sg, req->nents, i) {
		int sglen = sg_dma_len(sg);

		/* Do not overflow the request */