Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 5110e655 authored by Harsh Jain's avatar Harsh Jain Committed by Herbert Xu
Browse files

crypto: chelsio -Split Hash requests for large scatter gather list



Send multiple WRs to H/W when No. of entries received in scatter list
cannot be sent in single request.

Signed-off-by: default avatarHarsh Jain <harsh@chelsio.com>
Signed-off-by: default avatarHerbert Xu <herbert@gondor.apana.org.au>
parent 7ffb9118
Loading
Loading
Loading
Loading
+264 −94
Original line number Original line Diff line number Diff line
@@ -131,6 +131,11 @@ static inline int is_ofld_imm(const struct sk_buff *skb)
	return (skb->len <= SGE_MAX_WR_LEN);
	return (skb->len <= SGE_MAX_WR_LEN);
}
}


static inline void chcr_init_hctx_per_wr(struct chcr_ahash_req_ctx *reqctx)
{
	memset(&reqctx->hctx_wr, 0, sizeof(struct chcr_hctx_per_wr));
}

static int sg_nents_xlen(struct scatterlist *sg, unsigned int reqlen,
static int sg_nents_xlen(struct scatterlist *sg, unsigned int reqlen,
			 unsigned int entlen,
			 unsigned int entlen,
			 unsigned int skip)
			 unsigned int skip)
@@ -165,6 +170,7 @@ static inline void chcr_handle_ahash_resp(struct ahash_request *req,
					  int err)
					  int err)
{
{
	struct chcr_ahash_req_ctx *reqctx = ahash_request_ctx(req);
	struct chcr_ahash_req_ctx *reqctx = ahash_request_ctx(req);
	struct chcr_hctx_per_wr *hctx_wr = &reqctx->hctx_wr;
	int digestsize, updated_digestsize;
	int digestsize, updated_digestsize;
	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
	struct uld_ctx *u_ctx = ULD_CTX(h_ctx(tfm));
	struct uld_ctx *u_ctx = ULD_CTX(h_ctx(tfm));
@@ -172,25 +178,43 @@ static inline void chcr_handle_ahash_resp(struct ahash_request *req,
	if (input == NULL)
	if (input == NULL)
		goto out;
		goto out;
	digestsize = crypto_ahash_digestsize(crypto_ahash_reqtfm(req));
	digestsize = crypto_ahash_digestsize(crypto_ahash_reqtfm(req));
	if (reqctx->is_sg_map)
		chcr_hash_dma_unmap(&u_ctx->lldi.pdev->dev, req);
	if (reqctx->dma_addr)
		dma_unmap_single(&u_ctx->lldi.pdev->dev, reqctx->dma_addr,
				 reqctx->dma_len, DMA_TO_DEVICE);
	reqctx->dma_addr = 0;
	updated_digestsize = digestsize;
	updated_digestsize = digestsize;
	if (digestsize == SHA224_DIGEST_SIZE)
	if (digestsize == SHA224_DIGEST_SIZE)
		updated_digestsize = SHA256_DIGEST_SIZE;
		updated_digestsize = SHA256_DIGEST_SIZE;
	else if (digestsize == SHA384_DIGEST_SIZE)
	else if (digestsize == SHA384_DIGEST_SIZE)
		updated_digestsize = SHA512_DIGEST_SIZE;
		updated_digestsize = SHA512_DIGEST_SIZE;
	if (reqctx->result == 1) {

		reqctx->result = 0;
	if (hctx_wr->dma_addr) {
		dma_unmap_single(&u_ctx->lldi.pdev->dev, hctx_wr->dma_addr,
				 hctx_wr->dma_len, DMA_TO_DEVICE);
		hctx_wr->dma_addr = 0;
	}
	if (hctx_wr->isfinal || ((hctx_wr->processed + reqctx->reqlen) ==
				 req->nbytes)) {
		if (hctx_wr->result == 1) {
			hctx_wr->result = 0;
			memcpy(req->result, input + sizeof(struct cpl_fw6_pld),
			memcpy(req->result, input + sizeof(struct cpl_fw6_pld),
			       digestsize);
			       digestsize);
		} else {
		} else {
		memcpy(reqctx->partial_hash, input + sizeof(struct cpl_fw6_pld),
			memcpy(reqctx->partial_hash,
			       input + sizeof(struct cpl_fw6_pld),
			       updated_digestsize);
			       updated_digestsize);

		}
		}
		goto unmap;
	}
	memcpy(reqctx->partial_hash, input + sizeof(struct cpl_fw6_pld),
	       updated_digestsize);

	err = chcr_ahash_continue(req);
	if (err)
		goto unmap;
	return;
unmap:
	if (hctx_wr->is_sg_map)
		chcr_hash_dma_unmap(&u_ctx->lldi.pdev->dev, req);


out:
out:
	req->base.complete(&req->base, err);
	req->base.complete(&req->base, err);
}
}
@@ -563,7 +587,6 @@ static void ulptx_walk_add_sg(struct ulptx_walk *walk,


	if (!len)
	if (!len)
		return;
		return;

	while (sg && skip) {
	while (sg && skip) {
		if (sg_dma_len(sg) <= skip) {
		if (sg_dma_len(sg) <= skip) {
			skip -= sg_dma_len(sg);
			skip -= sg_dma_len(sg);
@@ -653,6 +676,35 @@ static int generate_copy_rrkey(struct ablk_ctx *ablkctx,
	}
	}
	return 0;
	return 0;
}
}

static int chcr_hash_ent_in_wr(struct scatterlist *src,
			     unsigned int minsg,
			     unsigned int space,
			     unsigned int srcskip)
{
	int srclen = 0;
	int srcsg = minsg;
	int soffset = 0, sless;

	if (sg_dma_len(src) == srcskip) {
		src = sg_next(src);
		srcskip = 0;
	}
	while (src && space > (sgl_ent_len[srcsg + 1])) {
		sless = min_t(unsigned int, sg_dma_len(src) - soffset -	srcskip,
							CHCR_SRC_SG_SIZE);
		srclen += sless;
		soffset += sless;
		srcsg++;
		if (sg_dma_len(src) == (soffset + srcskip)) {
			src = sg_next(src);
			soffset = 0;
			srcskip = 0;
		}
	}
	return srclen;
}

static int chcr_sg_ent_in_wr(struct scatterlist *src,
static int chcr_sg_ent_in_wr(struct scatterlist *src,
			     struct scatterlist *dst,
			     struct scatterlist *dst,
			     unsigned int minsg,
			     unsigned int minsg,
@@ -1153,7 +1205,7 @@ static int chcr_handle_cipher_resp(struct ablkcipher_request *req,
	}
	}
	if (!reqctx->imm) {
	if (!reqctx->imm) {
		bytes = chcr_sg_ent_in_wr(reqctx->srcsg, reqctx->dstsg, 1,
		bytes = chcr_sg_ent_in_wr(reqctx->srcsg, reqctx->dstsg, 1,
					  SPACE_LEFT(ablkctx->enckey_len),
					  CIP_SPACE_LEFT(ablkctx->enckey_len),
					  reqctx->src_ofst, reqctx->dst_ofst);
					  reqctx->src_ofst, reqctx->dst_ofst);
		if ((bytes + reqctx->processed) >= req->nbytes)
		if ((bytes + reqctx->processed) >= req->nbytes)
			bytes  = req->nbytes - reqctx->processed;
			bytes  = req->nbytes - reqctx->processed;
@@ -1257,7 +1309,7 @@ static int process_cipher(struct ablkcipher_request *req,
	if (!reqctx->imm) {
	if (!reqctx->imm) {
		bytes = chcr_sg_ent_in_wr(req->src, req->dst,
		bytes = chcr_sg_ent_in_wr(req->src, req->dst,
					  MIN_CIPHER_SG,
					  MIN_CIPHER_SG,
					  SPACE_LEFT(ablkctx->enckey_len),
					  CIP_SPACE_LEFT(ablkctx->enckey_len),
					  0, 0);
					  0, 0);
		if ((bytes + reqctx->processed) >= req->nbytes)
		if ((bytes + reqctx->processed) >= req->nbytes)
			bytes  = req->nbytes - reqctx->processed;
			bytes  = req->nbytes - reqctx->processed;
@@ -1513,35 +1565,24 @@ static struct sk_buff *create_hash_wr(struct ahash_request *req,
	struct uld_ctx *u_ctx = ULD_CTX(h_ctx(tfm));
	struct uld_ctx *u_ctx = ULD_CTX(h_ctx(tfm));
	struct chcr_wr *chcr_req;
	struct chcr_wr *chcr_req;
	struct ulptx_sgl *ulptx;
	struct ulptx_sgl *ulptx;
	unsigned int nents = 0, transhdr_len, iopad_alignment = 0;
	unsigned int nents = 0, transhdr_len;
	unsigned int digestsize = crypto_ahash_digestsize(tfm);
	unsigned int temp = 0;
	unsigned int kctx_len = 0, temp = 0;
	u8 hash_size_in_response = 0;
	gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
	gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
		GFP_ATOMIC;
		GFP_ATOMIC;
	struct adapter *adap = padap(h_ctx(tfm)->dev);
	struct adapter *adap = padap(h_ctx(tfm)->dev);
	int error = 0;
	int error = 0;


	iopad_alignment = KEYCTX_ALIGN_PAD(digestsize);
	transhdr_len = HASH_TRANSHDR_SIZE(param->kctx_len);
	kctx_len = param->alg_prm.result_size + iopad_alignment;
	req_ctx->hctx_wr.imm = (transhdr_len + param->bfr_len +
	if (param->opad_needed)
				param->sg_len) <= SGE_MAX_WR_LEN;
		kctx_len += param->alg_prm.result_size + iopad_alignment;
	nents = sg_nents_xlen(req_ctx->hctx_wr.srcsg, param->sg_len,

		      CHCR_SRC_SG_SIZE, req_ctx->hctx_wr.src_ofst);
	if (req_ctx->result)
		hash_size_in_response = digestsize;
	else
		hash_size_in_response = param->alg_prm.result_size;
	transhdr_len = HASH_TRANSHDR_SIZE(kctx_len);
	req_ctx->imm = (transhdr_len + param->bfr_len + param->sg_len) <=
		SGE_MAX_WR_LEN;
	nents = sg_nents_xlen(req->src, param->sg_len, CHCR_SRC_SG_SIZE, 0);
	nents += param->bfr_len ? 1 : 0;
	nents += param->bfr_len ? 1 : 0;
	transhdr_len += req_ctx->imm ? roundup((param->bfr_len +
	transhdr_len += req_ctx->hctx_wr.imm ? roundup(param->bfr_len +
			param->sg_len), 16) :
				param->sg_len, 16) : (sgl_len(nents) * 8);
			(sgl_len(nents) * 8);
	transhdr_len = roundup(transhdr_len, 16);
	transhdr_len = roundup(transhdr_len, 16);


	skb = alloc_skb(SGE_MAX_WR_LEN, flags);
	skb = alloc_skb(transhdr_len, flags);
	if (!skb)
	if (!skb)
		return ERR_PTR(-ENOMEM);
		return ERR_PTR(-ENOMEM);
	chcr_req = __skb_put_zero(skb, transhdr_len);
	chcr_req = __skb_put_zero(skb, transhdr_len);
@@ -1573,33 +1614,33 @@ static struct sk_buff *create_hash_wr(struct ahash_request *req,
	chcr_req->key_ctx.ctx_hdr = FILL_KEY_CTX_HDR(CHCR_KEYCTX_NO_KEY,
	chcr_req->key_ctx.ctx_hdr = FILL_KEY_CTX_HDR(CHCR_KEYCTX_NO_KEY,
					    param->alg_prm.mk_size, 0,
					    param->alg_prm.mk_size, 0,
					    param->opad_needed,
					    param->opad_needed,
					    ((kctx_len +
					    ((param->kctx_len +
					     sizeof(chcr_req->key_ctx)) >> 4));
					     sizeof(chcr_req->key_ctx)) >> 4));
	chcr_req->sec_cpl.scmd1 = cpu_to_be64((u64)param->scmd1);
	chcr_req->sec_cpl.scmd1 = cpu_to_be64((u64)param->scmd1);
	ulptx = (struct ulptx_sgl *)((u8 *)(chcr_req + 1) + kctx_len +
	ulptx = (struct ulptx_sgl *)((u8 *)(chcr_req + 1) + param->kctx_len +
				     DUMMY_BYTES);
				     DUMMY_BYTES);
	if (param->bfr_len != 0) {
	if (param->bfr_len != 0) {
		req_ctx->dma_addr = dma_map_single(&u_ctx->lldi.pdev->dev,
		req_ctx->hctx_wr.dma_addr =
					  req_ctx->reqbfr, param->bfr_len,
			dma_map_single(&u_ctx->lldi.pdev->dev, req_ctx->reqbfr,
					  DMA_TO_DEVICE);
				       param->bfr_len, DMA_TO_DEVICE);
		if (dma_mapping_error(&u_ctx->lldi.pdev->dev,
		if (dma_mapping_error(&u_ctx->lldi.pdev->dev,
				       req_ctx->dma_addr)) {
				       req_ctx->hctx_wr. dma_addr)) {
			error = -ENOMEM;
			error = -ENOMEM;
			goto err;
			goto err;
		}
		}
		req_ctx->dma_len = param->bfr_len;
		req_ctx->hctx_wr.dma_len = param->bfr_len;
	} else {
	} else {
		req_ctx->dma_addr = 0;
		req_ctx->hctx_wr.dma_addr = 0;
	}
	}
	chcr_add_hash_src_ent(req, ulptx, param);
	chcr_add_hash_src_ent(req, ulptx, param);
	/* Request upto max wr size */
	/* Request upto max wr size */
	temp = kctx_len + DUMMY_BYTES + (req_ctx->imm ? (param->sg_len
	temp = param->kctx_len + DUMMY_BYTES + (req_ctx->hctx_wr.imm ?
					+ param->bfr_len) : 0);
				(param->sg_len + param->bfr_len) : 0);
	atomic_inc(&adap->chcr_stats.digest_rqst);
	atomic_inc(&adap->chcr_stats.digest_rqst);
	create_wreq(h_ctx(tfm), chcr_req, &req->base, req_ctx->imm,
	create_wreq(h_ctx(tfm), chcr_req, &req->base, req_ctx->hctx_wr.imm,
		    hash_size_in_response, transhdr_len,
		    param->hash_size, transhdr_len,
		    temp,  0);
		    temp,  0);
	req_ctx->skb = skb;
	req_ctx->hctx_wr.skb = skb;
	return skb;
	return skb;
err:
err:
	kfree_skb(skb);
	kfree_skb(skb);
@@ -1618,7 +1659,6 @@ static int chcr_ahash_update(struct ahash_request *req)
	int error;
	int error;


	bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm));
	bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm));

	u_ctx = ULD_CTX(h_ctx(rtfm));
	u_ctx = ULD_CTX(h_ctx(rtfm));
	if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
	if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
					    h_ctx(rtfm)->tx_qidx))) {
					    h_ctx(rtfm)->tx_qidx))) {
@@ -1635,17 +1675,26 @@ static int chcr_ahash_update(struct ahash_request *req)
		req_ctx->reqlen += nbytes;
		req_ctx->reqlen += nbytes;
		return 0;
		return 0;
	}
	}
	chcr_init_hctx_per_wr(req_ctx);
	error = chcr_hash_dma_map(&u_ctx->lldi.pdev->dev, req);
	error = chcr_hash_dma_map(&u_ctx->lldi.pdev->dev, req);
	if (error)
	if (error)
		return -ENOMEM;
		return -ENOMEM;
	get_alg_config(&params.alg_prm, crypto_ahash_digestsize(rtfm));
	params.kctx_len = roundup(params.alg_prm.result_size, 16);
	params.sg_len = chcr_hash_ent_in_wr(req->src, !!req_ctx->reqlen,
				     HASH_SPACE_LEFT(params.kctx_len), 0);
	if (params.sg_len > req->nbytes)
		params.sg_len = req->nbytes;
	params.sg_len = rounddown(params.sg_len + req_ctx->reqlen, bs) -
			req_ctx->reqlen;
	params.opad_needed = 0;
	params.opad_needed = 0;
	params.more = 1;
	params.more = 1;
	params.last = 0;
	params.last = 0;
	params.sg_len = nbytes - req_ctx->reqlen;
	params.bfr_len = req_ctx->reqlen;
	params.bfr_len = req_ctx->reqlen;
	params.scmd1 = 0;
	params.scmd1 = 0;
	get_alg_config(&params.alg_prm, crypto_ahash_digestsize(rtfm));
	req_ctx->hctx_wr.srcsg = req->src;
	req_ctx->result = 0;

	params.hash_size = params.alg_prm.result_size;
	req_ctx->data_len += params.sg_len + params.bfr_len;
	req_ctx->data_len += params.sg_len + params.bfr_len;
	skb = create_hash_wr(req, &params);
	skb = create_hash_wr(req, &params);
	if (IS_ERR(skb)) {
	if (IS_ERR(skb)) {
@@ -1653,6 +1702,7 @@ static int chcr_ahash_update(struct ahash_request *req)
		goto unmap;
		goto unmap;
	}
	}


	req_ctx->hctx_wr.processed += params.sg_len;
	if (remainder) {
	if (remainder) {
		/* Swap buffers */
		/* Swap buffers */
		swap(req_ctx->reqbfr, req_ctx->skbfr);
		swap(req_ctx->reqbfr, req_ctx->skbfr);
@@ -1690,16 +1740,27 @@ static int chcr_ahash_final(struct ahash_request *req)
	struct uld_ctx *u_ctx = NULL;
	struct uld_ctx *u_ctx = NULL;
	u8 bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm));
	u8 bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm));


	chcr_init_hctx_per_wr(req_ctx);
	u_ctx = ULD_CTX(h_ctx(rtfm));
	u_ctx = ULD_CTX(h_ctx(rtfm));
	if (is_hmac(crypto_ahash_tfm(rtfm)))
	if (is_hmac(crypto_ahash_tfm(rtfm)))
		params.opad_needed = 1;
		params.opad_needed = 1;
	else
	else
		params.opad_needed = 0;
		params.opad_needed = 0;
	params.sg_len = 0;
	params.sg_len = 0;
	req_ctx->hctx_wr.isfinal = 1;
	get_alg_config(&params.alg_prm, crypto_ahash_digestsize(rtfm));
	get_alg_config(&params.alg_prm, crypto_ahash_digestsize(rtfm));
	req_ctx->result = 1;
	params.kctx_len = roundup(params.alg_prm.result_size, 16);
	if (is_hmac(crypto_ahash_tfm(rtfm))) {
		params.opad_needed = 1;
		params.kctx_len *= 2;
	} else {
		params.opad_needed = 0;
	}

	req_ctx->hctx_wr.result = 1;
	params.bfr_len = req_ctx->reqlen;
	params.bfr_len = req_ctx->reqlen;
	req_ctx->data_len += params.bfr_len + params.sg_len;
	req_ctx->data_len += params.bfr_len + params.sg_len;
	req_ctx->hctx_wr.srcsg = req->src;
	if (req_ctx->reqlen == 0) {
	if (req_ctx->reqlen == 0) {
		create_last_hash_block(req_ctx->reqbfr, bs, req_ctx->data_len);
		create_last_hash_block(req_ctx->reqbfr, bs, req_ctx->data_len);
		params.last = 0;
		params.last = 0;
@@ -1712,16 +1773,81 @@ static int chcr_ahash_final(struct ahash_request *req)
		params.last = 1;
		params.last = 1;
		params.more = 0;
		params.more = 0;
	}
	}
	params.hash_size = crypto_ahash_digestsize(rtfm);
	skb = create_hash_wr(req, &params);
	skb = create_hash_wr(req, &params);
	if (IS_ERR(skb))
	if (IS_ERR(skb))
		return PTR_ERR(skb);
		return PTR_ERR(skb);

	req_ctx->reqlen = 0;
	skb->dev = u_ctx->lldi.ports[0];
	skb->dev = u_ctx->lldi.ports[0];
	set_wr_txq(skb, CPL_PRIORITY_DATA, h_ctx(rtfm)->tx_qidx);
	set_wr_txq(skb, CPL_PRIORITY_DATA, h_ctx(rtfm)->tx_qidx);
	chcr_send_wr(skb);
	chcr_send_wr(skb);
	return -EINPROGRESS;
	return -EINPROGRESS;
}
}


static int chcr_ahash_continue(struct ahash_request *req)
{
	struct chcr_ahash_req_ctx *reqctx = ahash_request_ctx(req);
	struct chcr_hctx_per_wr *hctx_wr = &reqctx->hctx_wr;
	struct crypto_ahash *rtfm = crypto_ahash_reqtfm(req);
	struct uld_ctx *u_ctx = NULL;
	struct sk_buff *skb;
	struct hash_wr_param params;
	u8  bs;
	int error;

	bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm));
	u_ctx = ULD_CTX(h_ctx(rtfm));
	if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
					    h_ctx(rtfm)->tx_qidx))) {
		if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
			return -EBUSY;
	}
	get_alg_config(&params.alg_prm, crypto_ahash_digestsize(rtfm));
	params.kctx_len = roundup(params.alg_prm.result_size, 16);
	if (is_hmac(crypto_ahash_tfm(rtfm))) {
		params.kctx_len *= 2;
		params.opad_needed = 1;
	} else {
		params.opad_needed = 0;
	}
	params.sg_len = chcr_hash_ent_in_wr(hctx_wr->srcsg, 0,
					    HASH_SPACE_LEFT(params.kctx_len),
					    hctx_wr->src_ofst);
	if ((params.sg_len + hctx_wr->processed) > req->nbytes)
		params.sg_len = req->nbytes - hctx_wr->processed;
	if (!hctx_wr->result ||
	    ((params.sg_len + hctx_wr->processed) < req->nbytes)) {
		if (is_hmac(crypto_ahash_tfm(rtfm))) {
			params.kctx_len /= 2;
			params.opad_needed = 0;
		}
		params.last = 0;
		params.more = 1;
		params.sg_len = rounddown(params.sg_len, bs);
		params.hash_size = params.alg_prm.result_size;
		params.scmd1 = 0;
	} else {
		params.last = 1;
		params.more = 0;
		params.hash_size = crypto_ahash_digestsize(rtfm);
		params.scmd1 = reqctx->data_len + params.sg_len;
	}
	params.bfr_len = 0;
	reqctx->data_len += params.sg_len;
	skb = create_hash_wr(req, &params);
	if (IS_ERR(skb)) {
		error = PTR_ERR(skb);
		goto err;
	}
	hctx_wr->processed += params.sg_len;
	skb->dev = u_ctx->lldi.ports[0];
	set_wr_txq(skb, CPL_PRIORITY_DATA, h_ctx(rtfm)->tx_qidx);
	chcr_send_wr(skb);
	return 0;
err:
	return error;
}

static int chcr_ahash_finup(struct ahash_request *req)
static int chcr_ahash_finup(struct ahash_request *req)
{
{
	struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
	struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
@@ -1740,37 +1866,59 @@ static int chcr_ahash_finup(struct ahash_request *req)
		if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
		if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
			return -EBUSY;
			return -EBUSY;
	}
	}
	chcr_init_hctx_per_wr(req_ctx);
	error = chcr_hash_dma_map(&u_ctx->lldi.pdev->dev, req);
	if (error)
		return -ENOMEM;


	if (is_hmac(crypto_ahash_tfm(rtfm)))
	get_alg_config(&params.alg_prm, crypto_ahash_digestsize(rtfm));
	params.kctx_len = roundup(params.alg_prm.result_size, 16);
	if (is_hmac(crypto_ahash_tfm(rtfm))) {
		params.kctx_len *= 2;
		params.opad_needed = 1;
		params.opad_needed = 1;
	else
	} else {
		params.opad_needed = 0;
		params.opad_needed = 0;
	}


	params.sg_len = chcr_hash_ent_in_wr(req->src, !!req_ctx->reqlen,
				    HASH_SPACE_LEFT(params.kctx_len), 0);
	if (params.sg_len < req->nbytes) {
		if (is_hmac(crypto_ahash_tfm(rtfm))) {
			params.kctx_len /= 2;
			params.opad_needed = 0;
		}
		params.last = 0;
		params.more = 1;
		params.sg_len = rounddown(params.sg_len + req_ctx->reqlen, bs)
					- req_ctx->reqlen;
		params.hash_size = params.alg_prm.result_size;
		params.scmd1 = 0;
	} else {
		params.last = 1;
		params.more = 0;
		params.sg_len = req->nbytes;
		params.sg_len = req->nbytes;
		params.hash_size = crypto_ahash_digestsize(rtfm);
		params.scmd1 = req_ctx->data_len + req_ctx->reqlen +
				params.sg_len;
	}
	params.bfr_len = req_ctx->reqlen;
	params.bfr_len = req_ctx->reqlen;
	get_alg_config(&params.alg_prm, crypto_ahash_digestsize(rtfm));
	req_ctx->data_len += params.bfr_len + params.sg_len;
	req_ctx->data_len += params.bfr_len + params.sg_len;
	req_ctx->result = 1;
	req_ctx->hctx_wr.result = 1;
	req_ctx->hctx_wr.srcsg = req->src;
	if ((req_ctx->reqlen + req->nbytes) == 0) {
	if ((req_ctx->reqlen + req->nbytes) == 0) {
		create_last_hash_block(req_ctx->reqbfr, bs, req_ctx->data_len);
		create_last_hash_block(req_ctx->reqbfr, bs, req_ctx->data_len);
		params.last = 0;
		params.last = 0;
		params.more = 1;
		params.more = 1;
		params.scmd1 = 0;
		params.scmd1 = 0;
		params.bfr_len = bs;
		params.bfr_len = bs;
	} else {
		params.scmd1 = req_ctx->data_len;
		params.last = 1;
		params.more = 0;
	}
	}
	error = chcr_hash_dma_map(&u_ctx->lldi.pdev->dev, req);
	if (error)
		return -ENOMEM;

	skb = create_hash_wr(req, &params);
	skb = create_hash_wr(req, &params);
	if (IS_ERR(skb)) {
	if (IS_ERR(skb)) {
		error = PTR_ERR(skb);
		error = PTR_ERR(skb);
		goto unmap;
		goto unmap;
	}
	}
	req_ctx->reqlen = 0;
	req_ctx->hctx_wr.processed += params.sg_len;
	skb->dev = u_ctx->lldi.ports[0];
	skb->dev = u_ctx->lldi.ports[0];
	set_wr_txq(skb, CPL_PRIORITY_DATA, h_ctx(rtfm)->tx_qidx);
	set_wr_txq(skb, CPL_PRIORITY_DATA, h_ctx(rtfm)->tx_qidx);
	chcr_send_wr(skb);
	chcr_send_wr(skb);
@@ -1801,21 +1949,42 @@ static int chcr_ahash_digest(struct ahash_request *req)
			return -EBUSY;
			return -EBUSY;
	}
	}


	if (is_hmac(crypto_ahash_tfm(rtfm)))
	chcr_init_hctx_per_wr(req_ctx);
		params.opad_needed = 1;
	else
		params.opad_needed = 0;
	error = chcr_hash_dma_map(&u_ctx->lldi.pdev->dev, req);
	error = chcr_hash_dma_map(&u_ctx->lldi.pdev->dev, req);
	if (error)
	if (error)
		return -ENOMEM;
		return -ENOMEM;


	get_alg_config(&params.alg_prm, crypto_ahash_digestsize(rtfm));
	params.kctx_len = roundup(params.alg_prm.result_size, 16);
	if (is_hmac(crypto_ahash_tfm(rtfm))) {
		params.kctx_len *= 2;
		params.opad_needed = 1;
	} else {
		params.opad_needed = 0;
	}
	params.sg_len = chcr_hash_ent_in_wr(req->src, !!req_ctx->reqlen,
				HASH_SPACE_LEFT(params.kctx_len), 0);
	if (params.sg_len < req->nbytes) {
		if (is_hmac(crypto_ahash_tfm(rtfm))) {
			params.kctx_len /= 2;
			params.opad_needed = 0;
		}
		params.last = 0;
		params.last = 0;
	params.more = 0;
		params.more = 1;
		params.scmd1 = 0;
		params.sg_len = rounddown(params.sg_len, bs);
		params.hash_size = params.alg_prm.result_size;
	} else {
		params.sg_len = req->nbytes;
		params.sg_len = req->nbytes;
		params.hash_size = crypto_ahash_digestsize(rtfm);
		params.last = 1;
		params.more = 0;
		params.scmd1 = req->nbytes + req_ctx->data_len;

	}
	params.bfr_len = 0;
	params.bfr_len = 0;
	params.scmd1 = 0;
	req_ctx->hctx_wr.result = 1;
	get_alg_config(&params.alg_prm, crypto_ahash_digestsize(rtfm));
	req_ctx->hctx_wr.srcsg = req->src;
	req_ctx->result = 1;
	req_ctx->data_len += params.bfr_len + params.sg_len;
	req_ctx->data_len += params.bfr_len + params.sg_len;


	if (req->nbytes == 0) {
	if (req->nbytes == 0) {
@@ -1829,6 +1998,7 @@ static int chcr_ahash_digest(struct ahash_request *req)
		error = PTR_ERR(skb);
		error = PTR_ERR(skb);
		goto unmap;
		goto unmap;
	}
	}
	req_ctx->hctx_wr.processed += params.sg_len;
	skb->dev = u_ctx->lldi.ports[0];
	skb->dev = u_ctx->lldi.ports[0];
	set_wr_txq(skb, CPL_PRIORITY_DATA, h_ctx(rtfm)->tx_qidx);
	set_wr_txq(skb, CPL_PRIORITY_DATA, h_ctx(rtfm)->tx_qidx);
	chcr_send_wr(skb);
	chcr_send_wr(skb);
@@ -1845,11 +2015,10 @@ static int chcr_ahash_export(struct ahash_request *areq, void *out)


	state->reqlen = req_ctx->reqlen;
	state->reqlen = req_ctx->reqlen;
	state->data_len = req_ctx->data_len;
	state->data_len = req_ctx->data_len;
	state->is_sg_map = 0;
	state->result = 0;
	memcpy(state->bfr1, req_ctx->reqbfr, req_ctx->reqlen);
	memcpy(state->bfr1, req_ctx->reqbfr, req_ctx->reqlen);
	memcpy(state->partial_hash, req_ctx->partial_hash,
	memcpy(state->partial_hash, req_ctx->partial_hash,
	       CHCR_HASH_MAX_DIGEST_SIZE);
	       CHCR_HASH_MAX_DIGEST_SIZE);
	chcr_init_hctx_per_wr(state);
		return 0;
		return 0;
}
}


@@ -1862,11 +2031,10 @@ static int chcr_ahash_import(struct ahash_request *areq, const void *in)
	req_ctx->data_len = state->data_len;
	req_ctx->data_len = state->data_len;
	req_ctx->reqbfr = req_ctx->bfr1;
	req_ctx->reqbfr = req_ctx->bfr1;
	req_ctx->skbfr = req_ctx->bfr2;
	req_ctx->skbfr = req_ctx->bfr2;
	req_ctx->is_sg_map = 0;
	req_ctx->result = 0;
	memcpy(req_ctx->bfr1, state->bfr1, CHCR_HASH_MAX_BLOCK_SIZE_128);
	memcpy(req_ctx->bfr1, state->bfr1, CHCR_HASH_MAX_BLOCK_SIZE_128);
	memcpy(req_ctx->partial_hash, state->partial_hash,
	memcpy(req_ctx->partial_hash, state->partial_hash,
	       CHCR_HASH_MAX_DIGEST_SIZE);
	       CHCR_HASH_MAX_DIGEST_SIZE);
	chcr_init_hctx_per_wr(req_ctx);
	return 0;
	return 0;
}
}


@@ -1963,10 +2131,8 @@ static int chcr_sha_init(struct ahash_request *areq)
	req_ctx->reqlen = 0;
	req_ctx->reqlen = 0;
	req_ctx->reqbfr = req_ctx->bfr1;
	req_ctx->reqbfr = req_ctx->bfr1;
	req_ctx->skbfr = req_ctx->bfr2;
	req_ctx->skbfr = req_ctx->bfr2;
	req_ctx->skb = NULL;
	req_ctx->result = 0;
	req_ctx->is_sg_map = 0;
	copy_hash_init_values(req_ctx->partial_hash, digestsize);
	copy_hash_init_values(req_ctx->partial_hash, digestsize);

	return 0;
	return 0;
}
}


@@ -2407,22 +2573,26 @@ void chcr_add_hash_src_ent(struct ahash_request *req,
	struct ulptx_walk ulp_walk;
	struct ulptx_walk ulp_walk;
	struct chcr_ahash_req_ctx *reqctx = ahash_request_ctx(req);
	struct chcr_ahash_req_ctx *reqctx = ahash_request_ctx(req);


	if (reqctx->imm) {
	if (reqctx->hctx_wr.imm) {
		u8 *buf = (u8 *)ulptx;
		u8 *buf = (u8 *)ulptx;


		if (param->bfr_len) {
		if (param->bfr_len) {
			memcpy(buf, reqctx->reqbfr, param->bfr_len);
			memcpy(buf, reqctx->reqbfr, param->bfr_len);
			buf += param->bfr_len;
			buf += param->bfr_len;
		}
		}
		sg_pcopy_to_buffer(req->src, sg_nents(req->src),

				   buf, param->sg_len, 0);
		sg_pcopy_to_buffer(reqctx->hctx_wr.srcsg,
				   sg_nents(reqctx->hctx_wr.srcsg), buf,
				   param->sg_len, 0);
	} else {
	} else {
		ulptx_walk_init(&ulp_walk, ulptx);
		ulptx_walk_init(&ulp_walk, ulptx);
		if (param->bfr_len)
		if (param->bfr_len)
			ulptx_walk_add_page(&ulp_walk, param->bfr_len,
			ulptx_walk_add_page(&ulp_walk, param->bfr_len,
					    &reqctx->dma_addr);
					    &reqctx->hctx_wr.dma_addr);
		ulptx_walk_add_sg(&ulp_walk, req->src, param->sg_len,
		ulptx_walk_add_sg(&ulp_walk, reqctx->hctx_wr.srcsg,
				  0);
				  param->sg_len, reqctx->hctx_wr.src_ofst);
		reqctx->hctx_wr.srcsg = ulp_walk.last_sg;
		reqctx->hctx_wr.src_ofst = ulp_walk.last_sg_len;
		ulptx_walk_end(&ulp_walk);
		ulptx_walk_end(&ulp_walk);
	}
	}
}
}
@@ -2439,7 +2609,7 @@ int chcr_hash_dma_map(struct device *dev,
			   DMA_TO_DEVICE);
			   DMA_TO_DEVICE);
	if (!error)
	if (!error)
		return -ENOMEM;
		return -ENOMEM;
	req_ctx->is_sg_map = 1;
	req_ctx->hctx_wr.is_sg_map = 1;
	return 0;
	return 0;
}
}


@@ -2453,7 +2623,7 @@ void chcr_hash_dma_unmap(struct device *dev,


	dma_unmap_sg(dev, req->src, sg_nents(req->src),
	dma_unmap_sg(dev, req->src, sg_nents(req->src),
			   DMA_TO_DEVICE);
			   DMA_TO_DEVICE);
	req_ctx->is_sg_map = 0;
	req_ctx->hctx_wr.is_sg_map = 0;


}
}


+7 −3
Original line number Original line Diff line number Diff line
@@ -222,8 +222,10 @@
#define MIN_GCM_SG			1 /* IV */
#define MIN_GCM_SG			1 /* IV */
#define MIN_DIGEST_SG			1 /*Partial Buffer*/
#define MIN_DIGEST_SG			1 /*Partial Buffer*/
#define MIN_CCM_SG			2 /*IV+B0*/
#define MIN_CCM_SG			2 /*IV+B0*/
#define SPACE_LEFT(len) \
#define CIP_SPACE_LEFT(len) \
	((SGE_MAX_WR_LEN - WR_MIN_LEN - (len)))
	((SGE_MAX_WR_LEN - CIP_WR_MIN_LEN - (len)))
#define HASH_SPACE_LEFT(len) \
	((SGE_MAX_WR_LEN - HASH_WR_MIN_LEN - (len)))


struct algo_param {
struct algo_param {
	unsigned int auth_mode;
	unsigned int auth_mode;
@@ -232,12 +234,14 @@ struct algo_param {
};
};


struct hash_wr_param {
struct hash_wr_param {
	struct algo_param alg_prm;
	unsigned int opad_needed;
	unsigned int opad_needed;
	unsigned int more;
	unsigned int more;
	unsigned int last;
	unsigned int last;
	struct algo_param alg_prm;
	unsigned int kctx_len;
	unsigned int sg_len;
	unsigned int sg_len;
	unsigned int bfr_len;
	unsigned int bfr_len;
	unsigned int hash_size;
	u64 scmd1;
	u64 scmd1;
};
};


+5 −1
Original line number Original line Diff line number Diff line
@@ -54,10 +54,14 @@
#define MAC_ERROR_BIT		0
#define MAC_ERROR_BIT		0
#define CHK_MAC_ERR_BIT(x)	(((x) >> MAC_ERROR_BIT) & 1)
#define CHK_MAC_ERR_BIT(x)	(((x) >> MAC_ERROR_BIT) & 1)
#define MAX_SALT                4
#define MAX_SALT                4
#define WR_MIN_LEN (sizeof(struct chcr_wr) + \
#define CIP_WR_MIN_LEN (sizeof(struct chcr_wr) + \
		    sizeof(struct cpl_rx_phys_dsgl) + \
		    sizeof(struct cpl_rx_phys_dsgl) + \
		    sizeof(struct ulptx_sgl))
		    sizeof(struct ulptx_sgl))


#define HASH_WR_MIN_LEN (sizeof(struct chcr_wr) + \
			DUMMY_BYTES + \
		    sizeof(struct ulptx_sgl))

#define padap(dev) pci_get_drvdata(dev->u_ctx->lldi.pdev)
#define padap(dev) pci_get_drvdata(dev->u_ctx->lldi.pdev)


struct uld_ctx;
struct uld_ctx;
+22 −10
Original line number Original line Diff line number Diff line
@@ -258,21 +258,32 @@ struct chcr_context {
	struct __crypto_ctx crypto_ctx[0];
	struct __crypto_ctx crypto_ctx[0];
};
};


struct chcr_ahash_req_ctx {
struct chcr_hctx_per_wr {
	struct scatterlist *srcsg;
	struct sk_buff *skb;
	dma_addr_t dma_addr;
	u32 dma_len;
	unsigned int src_ofst;
	unsigned int processed;
	u32 result;
	u32 result;
	u8 bfr1[CHCR_HASH_MAX_BLOCK_SIZE_128];
	u8 is_sg_map;
	u8 bfr2[CHCR_HASH_MAX_BLOCK_SIZE_128];
	u8 imm;
	/*Final callback called. Driver cannot rely on nbytes to decide
	 * final call
	 */
	u8 isfinal;
};

struct chcr_ahash_req_ctx {
	struct chcr_hctx_per_wr hctx_wr;
	u8 *reqbfr;
	u8 *reqbfr;
	u8 *skbfr;
	u8 *skbfr;
	dma_addr_t dma_addr;
	/* SKB which is being sent to the hardware for processing */
	u32 dma_len;
	u64 data_len;  /* Data len till time */
	u8 reqlen;
	u8 reqlen;
	u8 imm;
	u8 is_sg_map;
	u8 partial_hash[CHCR_HASH_MAX_DIGEST_SIZE];
	u8 partial_hash[CHCR_HASH_MAX_DIGEST_SIZE];
	u64 data_len;  /* Data len till time */
	u8 bfr1[CHCR_HASH_MAX_BLOCK_SIZE_128];
	/* SKB which is being sent to the hardware for processing */
	u8 bfr2[CHCR_HASH_MAX_BLOCK_SIZE_128];
	struct sk_buff *skb;
};
};


struct chcr_blkcipher_req_ctx {
struct chcr_blkcipher_req_ctx {
@@ -329,4 +340,5 @@ void chcr_add_hash_src_ent(struct ahash_request *req, struct ulptx_sgl *ulptx,
			   struct hash_wr_param *param);
			   struct hash_wr_param *param);
int chcr_hash_dma_map(struct device *dev, struct ahash_request *req);
int chcr_hash_dma_map(struct device *dev, struct ahash_request *req);
void chcr_hash_dma_unmap(struct device *dev, struct ahash_request *req);
void chcr_hash_dma_unmap(struct device *dev, struct ahash_request *req);
static int chcr_ahash_continue(struct ahash_request *req);
#endif /* __CHCR_CRYPTO_H__ */
#endif /* __CHCR_CRYPTO_H__ */