Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit f0d03dea authored by Uri Simchoni's avatar Uri Simchoni Committed by Herbert Xu
Browse files

crypto: mv_cesa - Make the copy-back of data optional



Make the copy-back of data optional (not done in hashing requests)

Signed-off-by: default avatarUri Simchoni <uri@jdland.co.il>
Signed-off-by: default avatarHerbert Xu <herbert@gondor.apana.org.au>
parent a58094ac
Loading
Loading
Loading
Loading
+27 −22
Original line number Original line Diff line number Diff line
@@ -40,6 +40,7 @@ enum engine_status {
 * @src_start:		offset to add to src start position (scatter list)
 * @src_start:		offset to add to src start position (scatter list)
 * @crypt_len:		length of current crypt process
 * @crypt_len:		length of current crypt process
 * @hw_nbytes:		total bytes to process in hw for this request
 * @hw_nbytes:		total bytes to process in hw for this request
 * @copy_back:		whether to copy data back (crypt) or not (hash)
 * @sg_dst_left:	bytes left dst to process in this scatter list
 * @sg_dst_left:	bytes left dst to process in this scatter list
 * @dst_start:		offset to add to dst start position (scatter list)
 * @dst_start:		offset to add to dst start position (scatter list)
 * @hw_processed_bytes:	number of bytes processed by hw (request).
 * @hw_processed_bytes:	number of bytes processed by hw (request).
@@ -60,6 +61,7 @@ struct req_progress {
	int crypt_len;
	int crypt_len;
	int hw_nbytes;
	int hw_nbytes;
	/* dst mostly */
	/* dst mostly */
	int copy_back;
	int sg_dst_left;
	int sg_dst_left;
	int dst_start;
	int dst_start;
	int hw_processed_bytes;
	int hw_processed_bytes;
@@ -267,10 +269,10 @@ static void dequeue_complete_req(void)
	struct crypto_async_request *req = cpg->cur_req;
	struct crypto_async_request *req = cpg->cur_req;
	void *buf;
	void *buf;
	int ret;
	int ret;
	cpg->p.hw_processed_bytes += cpg->p.crypt_len;
	if (cpg->p.copy_back) {
		int need_copy_len = cpg->p.crypt_len;
		int need_copy_len = cpg->p.crypt_len;
		int sram_offset = 0;
		int sram_offset = 0;

	cpg->p.hw_processed_bytes += cpg->p.crypt_len;
		do {
		do {
			int dst_copy;
			int dst_copy;


@@ -294,6 +296,8 @@ static void dequeue_complete_req(void)
			need_copy_len -= dst_copy;
			need_copy_len -= dst_copy;
			cpg->p.dst_start += dst_copy;
			cpg->p.dst_start += dst_copy;
		} while (need_copy_len > 0);
		} while (need_copy_len > 0);
	}



	BUG_ON(cpg->eng_st != ENGINE_W_DEQUEUE);
	BUG_ON(cpg->eng_st != ENGINE_W_DEQUEUE);
	if (cpg->p.hw_processed_bytes < cpg->p.hw_nbytes) {
	if (cpg->p.hw_processed_bytes < cpg->p.hw_nbytes) {
@@ -336,6 +340,7 @@ static void mv_enqueue_new_req(struct ablkcipher_request *req)
	p->hw_nbytes = req->nbytes;
	p->hw_nbytes = req->nbytes;
	p->complete = mv_crypto_algo_completion;
	p->complete = mv_crypto_algo_completion;
	p->process = mv_process_current_q;
	p->process = mv_process_current_q;
	p->copy_back = 1;


	num_sgs = count_sgs(req->src, req->nbytes);
	num_sgs = count_sgs(req->src, req->nbytes);
	sg_miter_start(&p->src_sg_it, req->src, num_sgs, SG_MITER_FROM_SG);
	sg_miter_start(&p->src_sg_it, req->src, num_sgs, SG_MITER_FROM_SG);