Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 059d73ee authored by Horia Geantă's avatar Horia Geantă Committed by Herbert Xu
Browse files

crypto: caam - use len instead of nents for bulding HW S/G table



Currently, conversion of SW S/G table into HW S/G layout relies on
nents returned by sg_nents_for_len(sg, len).
However this leaves the possibility of HW S/G referencing more data
then needed: since buffer length in HW S/G entries is filled using
sg_dma_len(sg), the last entry in HW S/G table might have a length
that is bigger than needed for the crypto request.

This way of S/G table conversion is fine, unless after converting a table
more entries have to be appended to the HW S/G table.
In this case, crypto engine would access data from the S/G entry having
the incorrect length, instead of advancing in the S/G table.
This situation doesn't exist, but the upcoming implementation of
IV update for skcipher algorithms needs to add a S/G entry after
req->dst S/G (corresponding to output IV).

Signed-off-by: default avatarHoria Geantă <horia.geanta@nxp.com>
Signed-off-by: default avatarHerbert Xu <herbert@gondor.apana.org.au>
parent 1fa6d053
Loading
Loading
Loading
Loading
+17 −18
Original line number Original line Diff line number Diff line
@@ -1284,37 +1284,36 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
	gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
	gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
		       GFP_KERNEL : GFP_ATOMIC;
		       GFP_KERNEL : GFP_ATOMIC;
	int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0;
	int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0;
	int src_len, dst_len = 0;
	struct aead_edesc *edesc;
	struct aead_edesc *edesc;
	int sec4_sg_index, sec4_sg_len, sec4_sg_bytes;
	int sec4_sg_index, sec4_sg_len, sec4_sg_bytes;
	unsigned int authsize = ctx->authsize;
	unsigned int authsize = ctx->authsize;


	if (unlikely(req->dst != req->src)) {
	if (unlikely(req->dst != req->src)) {
		src_nents = sg_nents_for_len(req->src, req->assoclen +
		src_len = req->assoclen + req->cryptlen;
					     req->cryptlen);
		dst_len = src_len + (encrypt ? authsize : (-authsize));

		src_nents = sg_nents_for_len(req->src, src_len);
		if (unlikely(src_nents < 0)) {
		if (unlikely(src_nents < 0)) {
			dev_err(jrdev, "Insufficient bytes (%d) in src S/G\n",
			dev_err(jrdev, "Insufficient bytes (%d) in src S/G\n",
				req->assoclen + req->cryptlen);
				src_len);
			return ERR_PTR(src_nents);
			return ERR_PTR(src_nents);
		}
		}


		dst_nents = sg_nents_for_len(req->dst, req->assoclen +
		dst_nents = sg_nents_for_len(req->dst, dst_len);
					     req->cryptlen +
						(encrypt ? authsize :
							   (-authsize)));
		if (unlikely(dst_nents < 0)) {
		if (unlikely(dst_nents < 0)) {
			dev_err(jrdev, "Insufficient bytes (%d) in dst S/G\n",
			dev_err(jrdev, "Insufficient bytes (%d) in dst S/G\n",
				req->assoclen + req->cryptlen +
				dst_len);
				(encrypt ? authsize : (-authsize)));
			return ERR_PTR(dst_nents);
			return ERR_PTR(dst_nents);
		}
		}
	} else {
	} else {
		src_nents = sg_nents_for_len(req->src, req->assoclen +
		src_len = req->assoclen + req->cryptlen +
					     req->cryptlen +
			  (encrypt ? authsize : 0);
					     (encrypt ? authsize : 0));

		src_nents = sg_nents_for_len(req->src, src_len);
		if (unlikely(src_nents < 0)) {
		if (unlikely(src_nents < 0)) {
			dev_err(jrdev, "Insufficient bytes (%d) in src S/G\n",
			dev_err(jrdev, "Insufficient bytes (%d) in src S/G\n",
				req->assoclen + req->cryptlen +
				src_len);
				(encrypt ? authsize : 0));
			return ERR_PTR(src_nents);
			return ERR_PTR(src_nents);
		}
		}
	}
	}
@@ -1386,12 +1385,12 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,


	sec4_sg_index = 0;
	sec4_sg_index = 0;
	if (mapped_src_nents > 1) {
	if (mapped_src_nents > 1) {
		sg_to_sec4_sg_last(req->src, mapped_src_nents,
		sg_to_sec4_sg_last(req->src, src_len,
				   edesc->sec4_sg + sec4_sg_index, 0);
				   edesc->sec4_sg + sec4_sg_index, 0);
		sec4_sg_index += mapped_src_nents;
		sec4_sg_index += mapped_src_nents;
	}
	}
	if (mapped_dst_nents > 1) {
	if (mapped_dst_nents > 1) {
		sg_to_sec4_sg_last(req->dst, mapped_dst_nents,
		sg_to_sec4_sg_last(req->dst, dst_len,
				   edesc->sec4_sg + sec4_sg_index, 0);
				   edesc->sec4_sg + sec4_sg_index, 0);
	}
	}


@@ -1756,11 +1755,11 @@ static struct skcipher_edesc *skcipher_edesc_alloc(struct skcipher_request *req,
		dma_to_sec4_sg_one(edesc->sec4_sg, iv_dma, ivsize, 0);
		dma_to_sec4_sg_one(edesc->sec4_sg, iv_dma, ivsize, 0);
	}
	}
	if (dst_sg_idx)
	if (dst_sg_idx)
		sg_to_sec4_sg_last(req->src, mapped_src_nents, edesc->sec4_sg +
		sg_to_sec4_sg_last(req->src, req->cryptlen, edesc->sec4_sg +
				   !!ivsize, 0);
				   !!ivsize, 0);


	if (mapped_dst_nents > 1) {
	if (mapped_dst_nents > 1) {
		sg_to_sec4_sg_last(req->dst, mapped_dst_nents,
		sg_to_sec4_sg_last(req->dst, req->cryptlen,
				   edesc->sec4_sg + dst_sg_idx, 0);
				   edesc->sec4_sg + dst_sg_idx, 0);
	}
	}


+17 −19
Original line number Original line Diff line number Diff line
@@ -917,6 +917,7 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
	gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
	gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
		       GFP_KERNEL : GFP_ATOMIC;
		       GFP_KERNEL : GFP_ATOMIC;
	int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0;
	int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0;
	int src_len, dst_len = 0;
	struct aead_edesc *edesc;
	struct aead_edesc *edesc;
	dma_addr_t qm_sg_dma, iv_dma = 0;
	dma_addr_t qm_sg_dma, iv_dma = 0;
	int ivsize = 0;
	int ivsize = 0;
@@ -938,13 +939,13 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
	}
	}


	if (likely(req->src == req->dst)) {
	if (likely(req->src == req->dst)) {
		src_nents = sg_nents_for_len(req->src, req->assoclen +
		src_len = req->assoclen + req->cryptlen +
					     req->cryptlen +
			  (encrypt ? authsize : 0);
						(encrypt ? authsize : 0));

		src_nents = sg_nents_for_len(req->src, src_len);
		if (unlikely(src_nents < 0)) {
		if (unlikely(src_nents < 0)) {
			dev_err(qidev, "Insufficient bytes (%d) in src S/G\n",
			dev_err(qidev, "Insufficient bytes (%d) in src S/G\n",
				req->assoclen + req->cryptlen +
				src_len);
				(encrypt ? authsize : 0));
			qi_cache_free(edesc);
			qi_cache_free(edesc);
			return ERR_PTR(src_nents);
			return ERR_PTR(src_nents);
		}
		}
@@ -957,23 +958,21 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
			return ERR_PTR(-ENOMEM);
			return ERR_PTR(-ENOMEM);
		}
		}
	} else {
	} else {
		src_nents = sg_nents_for_len(req->src, req->assoclen +
		src_len = req->assoclen + req->cryptlen;
					     req->cryptlen);
		dst_len = src_len + (encrypt ? authsize : (-authsize));

		src_nents = sg_nents_for_len(req->src, src_len);
		if (unlikely(src_nents < 0)) {
		if (unlikely(src_nents < 0)) {
			dev_err(qidev, "Insufficient bytes (%d) in src S/G\n",
			dev_err(qidev, "Insufficient bytes (%d) in src S/G\n",
				req->assoclen + req->cryptlen);
				src_len);
			qi_cache_free(edesc);
			qi_cache_free(edesc);
			return ERR_PTR(src_nents);
			return ERR_PTR(src_nents);
		}
		}


		dst_nents = sg_nents_for_len(req->dst, req->assoclen +
		dst_nents = sg_nents_for_len(req->dst, dst_len);
					     req->cryptlen +
					     (encrypt ? authsize :
							(-authsize)));
		if (unlikely(dst_nents < 0)) {
		if (unlikely(dst_nents < 0)) {
			dev_err(qidev, "Insufficient bytes (%d) in dst S/G\n",
			dev_err(qidev, "Insufficient bytes (%d) in dst S/G\n",
				req->assoclen + req->cryptlen +
				dst_len);
				(encrypt ? authsize : (-authsize)));
			qi_cache_free(edesc);
			qi_cache_free(edesc);
			return ERR_PTR(dst_nents);
			return ERR_PTR(dst_nents);
		}
		}
@@ -1082,12 +1081,11 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
		dma_to_qm_sg_one(sg_table + qm_sg_index, iv_dma, ivsize, 0);
		dma_to_qm_sg_one(sg_table + qm_sg_index, iv_dma, ivsize, 0);
		qm_sg_index++;
		qm_sg_index++;
	}
	}
	sg_to_qm_sg_last(req->src, mapped_src_nents, sg_table + qm_sg_index, 0);
	sg_to_qm_sg_last(req->src, src_len, sg_table + qm_sg_index, 0);
	qm_sg_index += mapped_src_nents;
	qm_sg_index += mapped_src_nents;


	if (mapped_dst_nents > 1)
	if (mapped_dst_nents > 1)
		sg_to_qm_sg_last(req->dst, mapped_dst_nents, sg_table +
		sg_to_qm_sg_last(req->dst, dst_len, sg_table + qm_sg_index, 0);
				 qm_sg_index, 0);


	qm_sg_dma = dma_map_single(qidev, sg_table, qm_sg_bytes, DMA_TO_DEVICE);
	qm_sg_dma = dma_map_single(qidev, sg_table, qm_sg_bytes, DMA_TO_DEVICE);
	if (dma_mapping_error(qidev, qm_sg_dma)) {
	if (dma_mapping_error(qidev, qm_sg_dma)) {
@@ -1340,10 +1338,10 @@ static struct skcipher_edesc *skcipher_edesc_alloc(struct skcipher_request *req,
	edesc->drv_req.drv_ctx = drv_ctx;
	edesc->drv_req.drv_ctx = drv_ctx;


	dma_to_qm_sg_one(sg_table, iv_dma, ivsize, 0);
	dma_to_qm_sg_one(sg_table, iv_dma, ivsize, 0);
	sg_to_qm_sg_last(req->src, mapped_src_nents, sg_table + 1, 0);
	sg_to_qm_sg_last(req->src, req->cryptlen, sg_table + 1, 0);


	if (mapped_dst_nents > 1)
	if (mapped_dst_nents > 1)
		sg_to_qm_sg_last(req->dst, mapped_dst_nents, sg_table +
		sg_to_qm_sg_last(req->dst, req->cryptlen, sg_table +
				 dst_sg_idx, 0);
				 dst_sg_idx, 0);


	edesc->qm_sg_dma = dma_map_single(qidev, sg_table, edesc->qm_sg_bytes,
	edesc->qm_sg_dma = dma_map_single(qidev, sg_table, edesc->qm_sg_bytes,
+29 −31
Original line number Original line Diff line number Diff line
@@ -371,6 +371,7 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
	gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
	gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
		      GFP_KERNEL : GFP_ATOMIC;
		      GFP_KERNEL : GFP_ATOMIC;
	int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0;
	int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0;
	int src_len, dst_len = 0;
	struct aead_edesc *edesc;
	struct aead_edesc *edesc;
	dma_addr_t qm_sg_dma, iv_dma = 0;
	dma_addr_t qm_sg_dma, iv_dma = 0;
	int ivsize = 0;
	int ivsize = 0;
@@ -387,23 +388,21 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
	}
	}


	if (unlikely(req->dst != req->src)) {
	if (unlikely(req->dst != req->src)) {
		src_nents = sg_nents_for_len(req->src, req->assoclen +
		src_len = req->assoclen + req->cryptlen;
					     req->cryptlen);
		dst_len = src_len + (encrypt ? authsize : (-authsize));

		src_nents = sg_nents_for_len(req->src, src_len);
		if (unlikely(src_nents < 0)) {
		if (unlikely(src_nents < 0)) {
			dev_err(dev, "Insufficient bytes (%d) in src S/G\n",
			dev_err(dev, "Insufficient bytes (%d) in src S/G\n",
				req->assoclen + req->cryptlen);
				src_len);
			qi_cache_free(edesc);
			qi_cache_free(edesc);
			return ERR_PTR(src_nents);
			return ERR_PTR(src_nents);
		}
		}


		dst_nents = sg_nents_for_len(req->dst, req->assoclen +
		dst_nents = sg_nents_for_len(req->dst, dst_len);
					     req->cryptlen +
					     (encrypt ? authsize :
							(-authsize)));
		if (unlikely(dst_nents < 0)) {
		if (unlikely(dst_nents < 0)) {
			dev_err(dev, "Insufficient bytes (%d) in dst S/G\n",
			dev_err(dev, "Insufficient bytes (%d) in dst S/G\n",
				req->assoclen + req->cryptlen +
				dst_len);
				(encrypt ? authsize : (-authsize)));
			qi_cache_free(edesc);
			qi_cache_free(edesc);
			return ERR_PTR(dst_nents);
			return ERR_PTR(dst_nents);
		}
		}
@@ -434,13 +433,13 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
			mapped_dst_nents = 0;
			mapped_dst_nents = 0;
		}
		}
	} else {
	} else {
		src_nents = sg_nents_for_len(req->src, req->assoclen +
		src_len = req->assoclen + req->cryptlen +
					     req->cryptlen +
			  (encrypt ? authsize : 0);
						(encrypt ? authsize : 0));

		src_nents = sg_nents_for_len(req->src, src_len);
		if (unlikely(src_nents < 0)) {
		if (unlikely(src_nents < 0)) {
			dev_err(dev, "Insufficient bytes (%d) in src S/G\n",
			dev_err(dev, "Insufficient bytes (%d) in src S/G\n",
				req->assoclen + req->cryptlen +
				src_len);
				(encrypt ? authsize : 0));
			qi_cache_free(edesc);
			qi_cache_free(edesc);
			return ERR_PTR(src_nents);
			return ERR_PTR(src_nents);
		}
		}
@@ -536,12 +535,11 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
		dma_to_qm_sg_one(sg_table + qm_sg_index, iv_dma, ivsize, 0);
		dma_to_qm_sg_one(sg_table + qm_sg_index, iv_dma, ivsize, 0);
		qm_sg_index++;
		qm_sg_index++;
	}
	}
	sg_to_qm_sg_last(req->src, mapped_src_nents, sg_table + qm_sg_index, 0);
	sg_to_qm_sg_last(req->src, src_len, sg_table + qm_sg_index, 0);
	qm_sg_index += mapped_src_nents;
	qm_sg_index += mapped_src_nents;


	if (mapped_dst_nents > 1)
	if (mapped_dst_nents > 1)
		sg_to_qm_sg_last(req->dst, mapped_dst_nents, sg_table +
		sg_to_qm_sg_last(req->dst, dst_len, sg_table + qm_sg_index, 0);
				 qm_sg_index, 0);


	qm_sg_dma = dma_map_single(dev, sg_table, qm_sg_bytes, DMA_TO_DEVICE);
	qm_sg_dma = dma_map_single(dev, sg_table, qm_sg_bytes, DMA_TO_DEVICE);
	if (dma_mapping_error(dev, qm_sg_dma)) {
	if (dma_mapping_error(dev, qm_sg_dma)) {
@@ -1159,10 +1157,10 @@ static struct skcipher_edesc *skcipher_edesc_alloc(struct skcipher_request *req)
	edesc->qm_sg_bytes = qm_sg_bytes;
	edesc->qm_sg_bytes = qm_sg_bytes;


	dma_to_qm_sg_one(sg_table, iv_dma, ivsize, 0);
	dma_to_qm_sg_one(sg_table, iv_dma, ivsize, 0);
	sg_to_qm_sg_last(req->src, mapped_src_nents, sg_table + 1, 0);
	sg_to_qm_sg_last(req->src, req->cryptlen, sg_table + 1, 0);


	if (mapped_dst_nents > 1)
	if (mapped_dst_nents > 1)
		sg_to_qm_sg_last(req->dst, mapped_dst_nents, sg_table +
		sg_to_qm_sg_last(req->dst, req->cryptlen, sg_table +
				 dst_sg_idx, 0);
				 dst_sg_idx, 0);


	edesc->qm_sg_dma = dma_map_single(dev, sg_table, edesc->qm_sg_bytes,
	edesc->qm_sg_dma = dma_map_single(dev, sg_table, edesc->qm_sg_bytes,
@@ -3422,9 +3420,9 @@ static int ahash_update_ctx(struct ahash_request *req)


	if (to_hash) {
	if (to_hash) {
		struct dpaa2_sg_entry *sg_table;
		struct dpaa2_sg_entry *sg_table;
		int src_len = req->nbytes - *next_buflen;


		src_nents = sg_nents_for_len(req->src,
		src_nents = sg_nents_for_len(req->src, src_len);
					     req->nbytes - (*next_buflen));
		if (src_nents < 0) {
		if (src_nents < 0) {
			dev_err(ctx->dev, "Invalid number of src SG.\n");
			dev_err(ctx->dev, "Invalid number of src SG.\n");
			return src_nents;
			return src_nents;
@@ -3465,7 +3463,7 @@ static int ahash_update_ctx(struct ahash_request *req)
			goto unmap_ctx;
			goto unmap_ctx;


		if (mapped_nents) {
		if (mapped_nents) {
			sg_to_qm_sg_last(req->src, mapped_nents,
			sg_to_qm_sg_last(req->src, src_len,
					 sg_table + qm_sg_src_index, 0);
					 sg_table + qm_sg_src_index, 0);
			if (*next_buflen)
			if (*next_buflen)
				scatterwalk_map_and_copy(next_buf, req->src,
				scatterwalk_map_and_copy(next_buf, req->src,
@@ -3653,7 +3651,7 @@ static int ahash_finup_ctx(struct ahash_request *req)
	if (ret)
	if (ret)
		goto unmap_ctx;
		goto unmap_ctx;


	sg_to_qm_sg_last(req->src, mapped_nents, sg_table + qm_sg_src_index, 0);
	sg_to_qm_sg_last(req->src, req->nbytes, sg_table + qm_sg_src_index, 0);


	edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table, qm_sg_bytes,
	edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table, qm_sg_bytes,
					  DMA_TO_DEVICE);
					  DMA_TO_DEVICE);
@@ -3739,7 +3737,7 @@ static int ahash_digest(struct ahash_request *req)
		struct dpaa2_sg_entry *sg_table = &edesc->sgt[0];
		struct dpaa2_sg_entry *sg_table = &edesc->sgt[0];


		qm_sg_bytes = pad_sg_nents(mapped_nents) * sizeof(*sg_table);
		qm_sg_bytes = pad_sg_nents(mapped_nents) * sizeof(*sg_table);
		sg_to_qm_sg_last(req->src, mapped_nents, sg_table, 0);
		sg_to_qm_sg_last(req->src, req->nbytes, sg_table, 0);
		edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table,
		edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table,
						  qm_sg_bytes, DMA_TO_DEVICE);
						  qm_sg_bytes, DMA_TO_DEVICE);
		if (dma_mapping_error(ctx->dev, edesc->qm_sg_dma)) {
		if (dma_mapping_error(ctx->dev, edesc->qm_sg_dma)) {
@@ -3882,9 +3880,9 @@ static int ahash_update_no_ctx(struct ahash_request *req)


	if (to_hash) {
	if (to_hash) {
		struct dpaa2_sg_entry *sg_table;
		struct dpaa2_sg_entry *sg_table;
		int src_len = req->nbytes - *next_buflen;


		src_nents = sg_nents_for_len(req->src,
		src_nents = sg_nents_for_len(req->src, src_len);
					     req->nbytes - *next_buflen);
		if (src_nents < 0) {
		if (src_nents < 0) {
			dev_err(ctx->dev, "Invalid number of src SG.\n");
			dev_err(ctx->dev, "Invalid number of src SG.\n");
			return src_nents;
			return src_nents;
@@ -3918,7 +3916,7 @@ static int ahash_update_no_ctx(struct ahash_request *req)
		if (ret)
		if (ret)
			goto unmap_ctx;
			goto unmap_ctx;


		sg_to_qm_sg_last(req->src, mapped_nents, sg_table + 1, 0);
		sg_to_qm_sg_last(req->src, src_len, sg_table + 1, 0);


		if (*next_buflen)
		if (*next_buflen)
			scatterwalk_map_and_copy(next_buf, req->src,
			scatterwalk_map_and_copy(next_buf, req->src,
@@ -4037,7 +4035,7 @@ static int ahash_finup_no_ctx(struct ahash_request *req)
	if (ret)
	if (ret)
		goto unmap;
		goto unmap;


	sg_to_qm_sg_last(req->src, mapped_nents, sg_table + 1, 0);
	sg_to_qm_sg_last(req->src, req->nbytes, sg_table + 1, 0);


	edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table, qm_sg_bytes,
	edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table, qm_sg_bytes,
					  DMA_TO_DEVICE);
					  DMA_TO_DEVICE);
@@ -4107,9 +4105,9 @@ static int ahash_update_first(struct ahash_request *req)


	if (to_hash) {
	if (to_hash) {
		struct dpaa2_sg_entry *sg_table;
		struct dpaa2_sg_entry *sg_table;
		int src_len = req->nbytes - *next_buflen;


		src_nents = sg_nents_for_len(req->src,
		src_nents = sg_nents_for_len(req->src, src_len);
					     req->nbytes - (*next_buflen));
		if (src_nents < 0) {
		if (src_nents < 0) {
			dev_err(ctx->dev, "Invalid number of src SG.\n");
			dev_err(ctx->dev, "Invalid number of src SG.\n");
			return src_nents;
			return src_nents;
@@ -4144,7 +4142,7 @@ static int ahash_update_first(struct ahash_request *req)
		if (mapped_nents > 1) {
		if (mapped_nents > 1) {
			int qm_sg_bytes;
			int qm_sg_bytes;


			sg_to_qm_sg_last(req->src, mapped_nents, sg_table, 0);
			sg_to_qm_sg_last(req->src, src_len, sg_table, 0);
			qm_sg_bytes = pad_sg_nents(mapped_nents) *
			qm_sg_bytes = pad_sg_nents(mapped_nents) *
				      sizeof(*sg_table);
				      sizeof(*sg_table);
			edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table,
			edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table,
+7 −8
Original line number Original line Diff line number Diff line
@@ -729,7 +729,7 @@ static int ahash_edesc_add_src(struct caam_hash_ctx *ctx,
		unsigned int sgsize = sizeof(*sg) *
		unsigned int sgsize = sizeof(*sg) *
				      pad_sg_nents(first_sg + nents);
				      pad_sg_nents(first_sg + nents);


		sg_to_sec4_sg_last(req->src, nents, sg + first_sg, 0);
		sg_to_sec4_sg_last(req->src, to_hash, sg + first_sg, 0);


		src_dma = dma_map_single(ctx->jrdev, sg, sgsize, DMA_TO_DEVICE);
		src_dma = dma_map_single(ctx->jrdev, sg, sgsize, DMA_TO_DEVICE);
		if (dma_mapping_error(ctx->jrdev, src_dma)) {
		if (dma_mapping_error(ctx->jrdev, src_dma)) {
@@ -788,9 +788,9 @@ static int ahash_update_ctx(struct ahash_request *req)


	if (to_hash) {
	if (to_hash) {
		int pad_nents;
		int pad_nents;
		int src_len = req->nbytes - *next_buflen;


		src_nents = sg_nents_for_len(req->src,
		src_nents = sg_nents_for_len(req->src, src_len);
					     req->nbytes - (*next_buflen));
		if (src_nents < 0) {
		if (src_nents < 0) {
			dev_err(jrdev, "Invalid number of src SG.\n");
			dev_err(jrdev, "Invalid number of src SG.\n");
			return src_nents;
			return src_nents;
@@ -835,7 +835,7 @@ static int ahash_update_ctx(struct ahash_request *req)
			goto unmap_ctx;
			goto unmap_ctx;


		if (mapped_nents)
		if (mapped_nents)
			sg_to_sec4_sg_last(req->src, mapped_nents,
			sg_to_sec4_sg_last(req->src, src_len,
					   edesc->sec4_sg + sec4_sg_src_index,
					   edesc->sec4_sg + sec4_sg_src_index,
					   0);
					   0);
		else
		else
@@ -1208,9 +1208,9 @@ static int ahash_update_no_ctx(struct ahash_request *req)


	if (to_hash) {
	if (to_hash) {
		int pad_nents;
		int pad_nents;
		int src_len = req->nbytes - *next_buflen;


		src_nents = sg_nents_for_len(req->src,
		src_nents = sg_nents_for_len(req->src, src_len);
					     req->nbytes - *next_buflen);
		if (src_nents < 0) {
		if (src_nents < 0) {
			dev_err(jrdev, "Invalid number of src SG.\n");
			dev_err(jrdev, "Invalid number of src SG.\n");
			return src_nents;
			return src_nents;
@@ -1250,8 +1250,7 @@ static int ahash_update_no_ctx(struct ahash_request *req)
		if (ret)
		if (ret)
			goto unmap_ctx;
			goto unmap_ctx;


		sg_to_sec4_sg_last(req->src, mapped_nents,
		sg_to_sec4_sg_last(req->src, src_len, edesc->sec4_sg + 1, 0);
				   edesc->sec4_sg + 1, 0);


		if (*next_buflen) {
		if (*next_buflen) {
			scatterwalk_map_and_copy(next_buf, req->src,
			scatterwalk_map_and_copy(next_buf, req->src,
+2 −2
Original line number Original line Diff line number Diff line
@@ -306,11 +306,11 @@ static struct rsa_edesc *rsa_edesc_alloc(struct akcipher_request *req,
				   0);
				   0);


	if (sec4_sg_index)
	if (sec4_sg_index)
		sg_to_sec4_sg_last(req_ctx->fixup_src, src_nents,
		sg_to_sec4_sg_last(req_ctx->fixup_src, req_ctx->fixup_src_len,
				   edesc->sec4_sg + !!diff_size, 0);
				   edesc->sec4_sg + !!diff_size, 0);


	if (dst_nents > 1)
	if (dst_nents > 1)
		sg_to_sec4_sg_last(req->dst, dst_nents,
		sg_to_sec4_sg_last(req->dst, req->dst_len,
				   edesc->sec4_sg + sec4_sg_index, 0);
				   edesc->sec4_sg + sec4_sg_index, 0);


	/* Save nents for later use in Job Descriptor */
	/* Save nents for later use in Job Descriptor */
Loading