Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 4e5fa2b6 authored by Kranthikumar Kurapati's avatar Kranthikumar Kurapati Committed by Gerrit - the friendly Code Review server
Browse files

crypto: msm: qce50 driver multiple request enhancement



qce50 driver is enhanced to support multiple outstanding requests. The
ce_hw_support reports the maximum number of outstanding requests driver
can support. Sps interrupts are cut down. Under heavy traffic condition,
SPS_IOVEC_FLAG_INT is only set in producer pipe descriptor
for every MAX_BUNCH_MODE_REQ + 1 request to save interrupts.

This enhancement is only for crypto v5.3 or above. For platform with
older devices, the existing process is still maintained in the driver
to provide compatibility with old devices. The maximum number of
outstanding requests is reported as 1 then.

Previous get around code to retrieve the MAC_FAILED status bit from
crypto hardware status registser for CCM, is not going to work after
multi request support enhancement. Re-work the work around.
A dummy request is added at end of CCM decrypt. The MAC_FAILED status
can be retrieved from the result dump of dummy request.

qcrypto driver is changed to submit as many requests as possible, up
to maximum number of outstanding requests reported from low level crypto
driver.

Low level crypto driver internal statistics are added for debugging.

Change-Id: Ib66032a5a20db95be1b5beac0b0cb0d048416b43
Acked-by: default avatarChe-Min Hsieh <cheminh@qti.qualcomm.com>
Signed-off-by: default avatarKranthikumar Kurapati <kkurap@codeaurora.org>
parent 4835df40
Loading
Loading
Loading
Loading
+12 −1
Original line number Diff line number Diff line
/* Qualcomm Crypto Engine driver.
 *
 * Copyright (c) 2010-2014, The Linux Foundation. All rights reserved.
 * Copyright (c) 2010-2015, The Linux Foundation. All rights reserved.
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 and
@@ -1935,6 +1935,16 @@ static int _ce_f8_setup(struct qce_device *pce_dev, struct qce_f8_req *req,
struct qce_pm_table qce_pm_table = {NULL, NULL};
EXPORT_SYMBOL(qce_pm_table);

void qce_get_driver_stats(void *handle)
{
}
EXPORT_SYMBOL(qce_get_driver_stats);

void qce_clear_driver_stats(void *handle)
{
}
EXPORT_SYMBOL(qce_clear_driver_stats);

int qce_aead_req(void *handle, struct qce_req *q_req)
{
	struct qce_device *pce_dev = (struct qce_device *) handle;
@@ -2389,6 +2399,7 @@ int qce_hw_support(void *handle, struct ce_hw_support *ce_support)
	ce_support->aligned_only = false;
	ce_support->is_shared = false;
	ce_support->bam = false;
	ce_support->max_request = 1;
	return 0;
}
EXPORT_SYMBOL(qce_hw_support);
+4 −1
Original line number Diff line number Diff line
/* Qualcomm Crypto Engine driver API
 *
 * Copyright (c) 2010-2014, The Linux Foundation. All rights reserved.
 * Copyright (c) 2010-2015, The Linux Foundation. All rights reserved.
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 and
@@ -125,6 +125,7 @@ struct ce_hw_support {
	bool clk_mgmt_sus_res;
	unsigned int ce_device;
	unsigned int ce_hw_instance;
	unsigned int max_request;
};

/* Sha operation parameters */
@@ -183,5 +184,7 @@ int qce_hw_support(void *handle, struct ce_hw_support *support);
int qce_process_sha_req(void *handle, struct qce_sha_req *s_req);
int qce_enable_clk(void *handle);
int qce_disable_clk(void *handle);
void qce_get_driver_stats(void *handle);
void qce_clear_driver_stats(void *handle);

#endif /* __CRYPTO_MSM_QCE_H */
+1332 −773

File changed.

Preview size limit exceeded, changes collapsed.

+53 −18
Original line number Diff line number Diff line
@@ -46,6 +46,15 @@ enum qce_pipe_st_enum {
	QCE_PIPE_STATE_LAST
};

enum qce_xfer_type_enum {
	QCE_XFER_HASHING,
	QCE_XFER_CIPHERING,
	QCE_XFER_AEAD,
	QCE_XFER_F8,
	QCE_XFER_F9,
	QCE_XFER_TYPE_LAST
};

struct qce_sps_ep_conn_data {
	struct sps_pipe			*pipe;
	struct sps_connect		connect;
@@ -114,6 +123,7 @@ struct qce_cmdlistptr_ops {
	struct qce_cmdlist_info aead_hmac_sha256_cbc_3des;
	struct qce_cmdlist_info aead_aes_128_ccm;
	struct qce_cmdlist_info aead_aes_256_ccm;
	struct qce_cmdlist_info cipher_null;
	struct qce_cmdlist_info f8_kasumi;
	struct qce_cmdlist_info f8_snow3g;
	struct qce_cmdlist_info f9_kasumi;
@@ -165,18 +175,10 @@ struct qce_ce_cfg_reg_setting {
	uint32_t auth_cfg_snow3g;
};

/* DM data structure with buffers, commandlists & commmand pointer lists */
struct ce_sps_data {

struct ce_bam_info {
	uint32_t			bam_irq;
	uint32_t			bam_mem;
	void __iomem			*bam_iobase;

	struct qce_sps_ep_conn_data	producer;
	struct qce_sps_ep_conn_data	consumer;
	struct sps_event_notify		notify;
	struct scatterlist		*src;
	struct scatterlist		*dst;
	uint32_t			ce_device;
	uint32_t			ce_hw_instance;
	uint32_t			bam_ee;
@@ -184,22 +186,55 @@ struct ce_sps_data {
	unsigned int			src_pipe_index;
	unsigned int			dest_pipe_index;
	unsigned long			bam_handle;
	int				ce_burst_size;
	uint32_t			minor_version;
	struct qce_sps_ep_conn_data	producer;
	struct qce_sps_ep_conn_data	consumer;
};

	enum qce_pipe_st_enum consumer_state;	/* Consumer pipe state */
/* SPS data structure with buffers, commandlists & commmand pointer lists */
struct ce_sps_data {
	enum qce_pipe_st_enum producer_state;	/* Producer pipe state */

	int consumer_status;		/* consumer pipe status */
	int producer_status;		/* producer pipe status */

	struct sps_transfer in_transfer;
	struct sps_transfer out_transfer;
	struct qce_cmdlistptr_ops cmdlistptr;
	uint32_t result_dump; /* reuslt dump virtual address */
	uint32_t result_dump_null;
	uint32_t result_dump_phy; /* result dump physical address (32 bits) */
	uint32_t result_dump_null_phy;

	uint32_t ignore_buffer; /* ignore buffer virtual address */
	struct ce_result_dump_format *result; /* ponter to result dump */
	struct ce_result_dump_format *result_null;
};

	int ce_burst_size;
struct ce_request_info {
	bool in_use;
	bool in_prog;
	enum qce_xfer_type_enum	xfer_type;
	struct ce_sps_data ce_sps;
	qce_comp_func_ptr_t qce_cb;	/* qce callback function pointer */
	void *user;
	void *areq;
	int assoc_nents;
	int src_nents;
	int dst_nents;
	dma_addr_t phy_iv_in;
	unsigned char dec_iv[16];
	int dir;
	enum qce_cipher_mode_enum mode;
	dma_addr_t phy_ota_src;
	dma_addr_t phy_ota_dst;
	unsigned int ota_size;
};

	struct qce_cmdlistptr_ops cmdlistptr;
	uint32_t result_dump;
	uint32_t ignore_buffer;
	struct ce_result_dump_format *result;
	uint32_t minor_version;
struct qce_driver_stats {
	int no_of_timeouts;
	int no_of_dummy_reqs;
	int current_mode;
	int outstanding_reqs;
};

#endif /* _DRIVERS_CRYPTO_MSM_QCE50_H */
+203 −54
Original line number Diff line number Diff line
@@ -52,6 +52,7 @@

#define DEBUG_MAX_FNAME  16
#define DEBUG_MAX_RW_BUF 2048
#define QCRYPTO_BIG_NUMBER 9999999 /* a big number */

/*
 * For crypto 5.0 which has burst size alignment requirement.
@@ -125,13 +126,18 @@ static struct dentry *_debug_dent;
static char _debug_read_buf[DEBUG_MAX_RW_BUF];
static bool _qcrypto_init_assign;
struct crypto_priv;
struct qcrypto_req_control {
	unsigned int index;
	bool in_use;
	struct crypto_engine *pce;
	struct crypto_async_request *req;
	struct qcrypto_resp_ctx *arsp;
};

struct crypto_engine {
	struct list_head elist;
	void *qce; /* qce handle */
	struct platform_device *pdev; /* platform device */
	struct crypto_async_request *req; /* current active request */
	struct qcrypto_resp_ctx *arsp;    /* rsp associcated with req */
	int res;                          /* execution result */
	struct crypto_priv *pcp;
	uint32_t  bus_scale_handle;
	struct crypto_queue req_queue;	/*
@@ -158,6 +164,10 @@ struct crypto_engine {
	u32    last_active_seq;

	bool   check_flag;
	/*Added to support multi-requests*/
	unsigned int max_req;
	struct   qcrypto_req_control *preq_pool;
	atomic_t req_count;
};

struct crypto_priv {
@@ -203,6 +213,65 @@ struct crypto_priv {
static struct crypto_priv qcrypto_dev;
static struct crypto_engine *_qcrypto_static_assign_engine(
					struct crypto_priv *cp);
static struct crypto_engine *_avail_eng(struct crypto_priv *cp);

static struct qcrypto_req_control *qcrypto_alloc_req_control(
						struct crypto_engine *pce)
{
	int i;
	struct qcrypto_req_control *pqcrypto_req_control = pce->preq_pool;

	for (i = 0; i < pce->max_req; i++) {
		if (xchg(&pqcrypto_req_control->in_use, true) == false) {
			atomic_inc(&pce->req_count);
			return pqcrypto_req_control;
		}
		pqcrypto_req_control++;
	}
	return NULL;
}

static void qcrypto_free_req_control(struct crypto_engine *pce,
					struct qcrypto_req_control *preq)
{
	if (xchg(&preq->in_use, false) == false) {
		pr_warn("request info %p free already\n", preq);
	} else {
		preq->req = NULL;
		preq->arsp = NULL;
		atomic_dec(&pce->req_count);
	}
}

static struct qcrypto_req_control *find_req_control_for_areq(
					struct crypto_engine *pce,
					struct crypto_async_request *areq)
{
	int i;
	struct qcrypto_req_control *pqcrypto_req_control = pce->preq_pool;

	for (i = 0; i < pce->max_req; i++) {
		if (pqcrypto_req_control->req == areq)
			return pqcrypto_req_control;
		pqcrypto_req_control++;
	}
	return NULL;
}

static void qcrypto_init_req_control(struct crypto_engine *pce,
			struct qcrypto_req_control *pqcrypto_req_control)
{
	int i;

	pce->preq_pool = pqcrypto_req_control;
	atomic_set(&pce->req_count, 0);
	for (i = 0; i < pce->max_req; i++) {
		pqcrypto_req_control->index = i;
		pqcrypto_req_control->in_use = false;
		pqcrypto_req_control->pce = pce;
		pqcrypto_req_control++;
	}
}

static struct crypto_engine *_qrypto_find_pengine_device(struct crypto_priv *cp,
			 unsigned int device)
@@ -579,11 +648,11 @@ static void qcrypto_bw_reaper_work(struct work_struct *work)
		(active_seq == pengine->last_active_seq)) {

		/* check if engine is stuck */
		if (pengine->req) {
		if (atomic_read(&pengine->req_count) > 0) {
			if (pengine->check_flag)
				dev_warn(&pengine->pdev->dev,
				"The engine appears to be stuck seq %d req %p.\n",
				active_seq, pengine->req);
				"The engine appears to be stuck seq %d.\n",
				active_seq);
			pengine->check_flag = false;
			goto ret;
		}
@@ -1046,6 +1115,7 @@ static int _disp_stats(int id)
			pe->unit,
			pe->err_req
		);
		qce_get_driver_stats(pe->qce);
	}
	spin_unlock_irqrestore(&cp->lock, flags);
	return len;
@@ -1076,6 +1146,8 @@ static void _qcrypto_remove_engine(struct crypto_engine *pengine)
		msm_bus_scale_unregister_client(pengine->bus_scale_handle);
	pengine->bus_scale_handle = 0;

	kzfree(pengine->preq_pool);

	if (cp->total_units)
		return;

@@ -1270,18 +1342,31 @@ static int _qcrypto_setkey_3des(struct crypto_ablkcipher *cipher, const u8 *key,

static struct crypto_engine *eng_sel_avoid_first(struct crypto_priv *cp)
{
	struct crypto_engine *pe = NULL;
	struct crypto_engine *pe1 = NULL;
	/*
	 * This function need not be spinlock protected when called from
	 * the seq_response workq as it will not have any contentions when all
	 * request processing is stopped.
	 */
	struct crypto_engine *p;
	struct crypto_engine *q = NULL;
	int max_user =  QCRYPTO_BIG_NUMBER;
	int use_cnt;

	if (unlikely(list_empty(&cp->engine_list))) {
		pr_err("%s: no valid ce to schedule\n", __func__);
		return NULL;
	}

	pe = pe1 = list_first_entry(&cp->engine_list, struct crypto_engine,
	p = list_first_entry(&cp->engine_list, struct crypto_engine,
								elist);
	list_for_each_entry_continue(pe, &cp->engine_list, elist) {
		if (pe->req == NULL)
			return pe;
	list_for_each_entry_continue(p, &cp->engine_list, elist) {
		use_cnt = atomic_read(&p->req_count);
		if ((use_cnt < p->max_req) && (use_cnt < max_user)) {
			q = p;
			max_user = use_cnt;
		}
	if (pe1->req == NULL)
		return pe1;
	return NULL;
	}
	return q;
}

static void seq_response(struct work_struct *work)
@@ -1376,29 +1461,27 @@ retry:
	}
}

static void req_done(struct crypto_engine *pengine)
static void req_done(struct qcrypto_req_control *pqcrypto_req_control)
{
	struct crypto_engine *pengine;
	struct crypto_async_request *areq;
	struct crypto_engine *pe;
	struct crypto_priv *cp;
	unsigned long flags;
	struct qcrypto_resp_ctx *arsp;
	int res;
	u32 type = 0;
	void *tfm_ctx = NULL;

	pengine = pqcrypto_req_control->pce;
	cp = pengine->pcp;
	spin_lock_irqsave(&cp->lock, flags);
	areq = pengine->req;
	areq = pqcrypto_req_control->req;
	arsp = pqcrypto_req_control->arsp;
	qcrypto_free_req_control(pengine, pqcrypto_req_control);

	arsp = pengine->arsp;
	res = pengine->res;
	pengine->req = NULL;
	pengine->arsp = NULL;
	if (areq) {
		type = crypto_tfm_alg_type(areq->tfm);
		tfm_ctx = crypto_tfm_ctx(areq->tfm);
		arsp->res = res;
	}
	pe = list_first_entry(&cp->engine_list, struct crypto_engine, elist);
	if (pe == pengine)
@@ -1418,6 +1501,7 @@ static void _qce_ahash_complete(void *cookie, unsigned char *digest,
		unsigned char *authdata, int ret)
{
	struct ahash_request *areq = (struct ahash_request *) cookie;
	struct crypto_async_request *async_req;
	struct crypto_ahash *ahash = crypto_ahash_reqtfm(areq);
	struct qcrypto_sha_ctx *sha_ctx = crypto_tfm_ctx(areq->base.tfm);
	struct qcrypto_sha_req_ctx *rctx = ahash_request_ctx(areq);
@@ -1426,10 +1510,19 @@ static void _qce_ahash_complete(void *cookie, unsigned char *digest,
	uint32_t diglen = crypto_ahash_digestsize(ahash);
	uint32_t *auth32 = (uint32_t *)authdata;
	struct crypto_engine *pengine;
	struct qcrypto_req_control *pqcrypto_req_control;

	async_req = &areq->base;
	pstat = &_qcrypto_stat;

	pengine = rctx->pengine;
	pqcrypto_req_control = find_req_control_for_areq(pengine,
							 async_req);
	if (pqcrypto_req_control == NULL) {
		pr_err("async request not found\n");
		return;
	}

#ifdef QCRYPTO_DEBUG
	dev_info(&pengine->pdev->dev, "_qce_ahash_complete: %p ret %d\n",
				areq, ret);
@@ -1451,33 +1544,43 @@ static void _qce_ahash_complete(void *cookie, unsigned char *digest,
	rctx->first_blk = 0;

	if (ret) {
		pengine->res = -ENXIO;
		pqcrypto_req_control->arsp->res = -ENXIO;
		pstat->ahash_op_fail++;
	} else {
		pengine->res = 0;
		pqcrypto_req_control->arsp->res = 0;
		pstat->ahash_op_success++;
	}
	if (cp->ce_support.aligned_only)  {
		areq->src = rctx->orig_src;
		kfree(rctx->data);
	}
	req_done(pengine);
	req_done(pqcrypto_req_control);
};

static void _qce_ablk_cipher_complete(void *cookie, unsigned char *icb,
		unsigned char *iv, int ret)
{
	struct ablkcipher_request *areq = (struct ablkcipher_request *) cookie;
	struct crypto_async_request *async_req;
	struct crypto_ablkcipher *ablk = crypto_ablkcipher_reqtfm(areq);
	struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(areq->base.tfm);
	struct crypto_priv *cp = ctx->cp;
	struct crypto_stat *pstat;
	struct qcrypto_cipher_req_ctx *rctx;
	struct crypto_engine *pengine;
	struct qcrypto_req_control *pqcrypto_req_control;

	async_req = &areq->base;
	pstat = &_qcrypto_stat;
	rctx = ablkcipher_request_ctx(areq);
	pengine = rctx->pengine;
	pqcrypto_req_control = find_req_control_for_areq(pengine,
							 async_req);
	if (pqcrypto_req_control == NULL) {
		pr_err("async request not found\n");
		return;
	}

#ifdef QCRYPTO_DEBUG
	dev_info(&pengine->pdev->dev, "_qce_ablk_cipher_complete: %p ret %d\n",
				areq, ret);
@@ -1486,10 +1589,10 @@ static void _qce_ablk_cipher_complete(void *cookie, unsigned char *icb,
		memcpy(ctx->iv, iv, crypto_ablkcipher_ivsize(ablk));

	if (ret) {
		pengine->res = -ENXIO;
		pqcrypto_req_control->arsp->res = -ENXIO;
		pstat->ablk_cipher_op_fail++;
	} else {
		pengine->res = 0;
		pqcrypto_req_control->arsp->res = 0;
		pstat->ablk_cipher_op_success++;
	}

@@ -1510,7 +1613,7 @@ static void _qce_ablk_cipher_complete(void *cookie, unsigned char *icb,
								areq->nbytes);
		kzfree(rctx->data);
	}
	req_done(pengine);
	req_done(pqcrypto_req_control);
};


@@ -1518,16 +1621,25 @@ static void _qce_aead_complete(void *cookie, unsigned char *icv,
				unsigned char *iv, int ret)
{
	struct aead_request *areq = (struct aead_request *) cookie;
	struct crypto_async_request *async_req;
	struct crypto_aead *aead = crypto_aead_reqtfm(areq);
	struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(areq->base.tfm);
	struct crypto_priv *cp = ctx->cp;
	struct qcrypto_cipher_req_ctx *rctx;
	struct crypto_stat *pstat;
	struct crypto_engine *pengine;
	struct qcrypto_req_control *pqcrypto_req_control;

	async_req = &areq->base;
	pstat = &_qcrypto_stat;
	rctx = aead_request_ctx(areq);
	pengine = rctx->pengine;
	pqcrypto_req_control = find_req_control_for_areq(pengine,
							 async_req);
	if (pqcrypto_req_control == NULL) {
		pr_err("async request not found\n");
		return;
	}

	if (rctx->mode == QCE_MODE_CCM) {
		if (cp->ce_support.aligned_only)  {
@@ -1623,8 +1735,8 @@ static void _qce_aead_complete(void *cookie, unsigned char *icv,
	else
		pstat->aead_op_success++;

	pengine->res = ret;
	req_done(pengine);
	pqcrypto_req_control->arsp->res = ret;
	req_done(pqcrypto_req_control);
}

static int aead_ccm_set_msg_len(u8 *block, unsigned int msglen, int csize)
@@ -1716,8 +1828,9 @@ static int qcrypto_aead_ccm_format_adata(struct qce_req *qreq, uint32_t alen,
}

static int _qcrypto_process_ablkcipher(struct crypto_engine *pengine,
				struct crypto_async_request *async_req)
			struct qcrypto_req_control *pqcrypto_req_control)
{
	struct crypto_async_request *async_req;
	struct qce_req qreq;
	int ret;
	struct qcrypto_cipher_req_ctx *rctx;
@@ -1725,6 +1838,7 @@ static int _qcrypto_process_ablkcipher(struct crypto_engine *pengine,
	struct ablkcipher_request *req;
	struct crypto_ablkcipher *tfm;

	async_req = pqcrypto_req_control->req;
	req = container_of(async_req, struct ablkcipher_request, base);
	cipher_ctx = crypto_tfm_ctx(async_req->tfm);
	rctx = ablkcipher_request_ctx(req);
@@ -1780,14 +1894,16 @@ static int _qcrypto_process_ablkcipher(struct crypto_engine *pengine,
}

static int _qcrypto_process_ahash(struct crypto_engine *pengine,
				struct crypto_async_request *async_req)
			struct qcrypto_req_control *pqcrypto_req_control)
{
	struct crypto_async_request *async_req;
	struct ahash_request *req;
	struct qce_sha_req sreq;
	struct qcrypto_sha_req_ctx *rctx;
	struct qcrypto_sha_ctx *sha_ctx;
	int ret = 0;

	async_req = pqcrypto_req_control->req;
	req = container_of(async_req,
				struct ahash_request, base);
	rctx = ahash_request_ctx(req);
@@ -1837,16 +1953,19 @@ static int _qcrypto_process_ahash(struct crypto_engine *pengine,
}

static int _qcrypto_process_aead(struct  crypto_engine *pengine,
				struct crypto_async_request *async_req)
			struct qcrypto_req_control *pqcrypto_req_control)
{
	struct crypto_async_request *async_req;
	struct qce_req qreq;
	int ret = 0;
	struct qcrypto_cipher_req_ctx *rctx;
	struct qcrypto_cipher_ctx *cipher_ctx;
	struct aead_request *req = container_of(async_req,
				struct aead_request, base);
	struct crypto_aead *aead = crypto_aead_reqtfm(req);
	struct aead_request *req;
	struct crypto_aead *aead;

	async_req = pqcrypto_req_control->req;
	req = container_of(async_req, struct aead_request, base);
	aead = crypto_aead_reqtfm(req);
	rctx = aead_request_ctx(req);
	rctx->pengine = pengine;
	cipher_ctx = crypto_tfm_ctx(async_req->tfm);
@@ -2076,12 +2195,13 @@ static int _start_qcrypto_process(struct crypto_priv *cp,
	struct ahash_request *ahash_req;
	struct aead_request *aead_req;
	struct qcrypto_resp_ctx *arsp;
	struct qcrypto_req_control *pqcrypto_req_control;

	pstat = &_qcrypto_stat;

again:
	spin_lock_irqsave(&cp->lock, flags);
	if (pengine->req) {
	if (atomic_read(&pengine->req_count) >= (pengine->max_req)) {
		spin_unlock_irqrestore(&cp->lock, flags);
		return 0;
	}
@@ -2108,6 +2228,12 @@ again:
			return 0;
		}
	}
	pqcrypto_req_control = qcrypto_alloc_req_control(pengine);
	if (pqcrypto_req_control == NULL) {
		pr_err("Allocation of request failed\n");
		spin_unlock_irqrestore(&cp->lock, flags);
		return 0;
	}

	/* add associated rsp entry to tfm response queue */
	type = crypto_tfm_alg_type(async_req->tfm);
@@ -2149,8 +2275,9 @@ again:
	atomic_inc(&cp->resp_cnt);
	arsp->res = -EINPROGRESS;
	arsp->async_req = async_req;
	pengine->req = async_req;
	pengine->arsp = arsp;
	pqcrypto_req_control->pce = pengine;
	pqcrypto_req_control->req = async_req;
	pqcrypto_req_control->arsp = arsp;
	pengine->active_seq++;
	pengine->check_flag = true;

@@ -2161,13 +2288,14 @@ again:
		backlog_cp->complete(backlog_cp, -EINPROGRESS);
	switch (type) {
	case CRYPTO_ALG_TYPE_ABLKCIPHER:
		ret = _qcrypto_process_ablkcipher(pengine, async_req);
		ret = _qcrypto_process_ablkcipher(pengine,
					pqcrypto_req_control);
		break;
	case CRYPTO_ALG_TYPE_AHASH:
		ret = _qcrypto_process_ahash(pengine, async_req);
		ret = _qcrypto_process_ahash(pengine, pqcrypto_req_control);
		break;
	case CRYPTO_ALG_TYPE_AEAD:
		ret = _qcrypto_process_aead(pengine, async_req);
		ret = _qcrypto_process_aead(pengine, pqcrypto_req_control);
		break;
	default:
		ret = -EINVAL;
@@ -2176,10 +2304,7 @@ again:
	if (ret) {
		arsp->res = ret;
		pengine->err_req++;
		spin_lock_irqsave(&cp->lock, flags);
		pengine->req = NULL;
		pengine->arsp = NULL;
		spin_unlock_irqrestore(&cp->lock, flags);
		qcrypto_free_req_control(pengine, pqcrypto_req_control);

		if (type == CRYPTO_ALG_TYPE_ABLKCIPHER)
			pstat->ablk_cipher_op_fail++;
@@ -2197,15 +2322,27 @@ again:

static struct crypto_engine *_avail_eng(struct crypto_priv *cp)
{
	struct crypto_engine *pe = NULL;
	/* call this function with spinlock set */
	struct crypto_engine *p;
	struct crypto_engine *q = NULL;
	int max_user =  QCRYPTO_BIG_NUMBER;
	int use_cnt;

	list_for_each_entry(pe, &cp->engine_list, elist) {
		if (pe->req == NULL)
			return pe;
	}
	if (unlikely(list_empty(&cp->engine_list))) {
		pr_err("%s: no valid ce to schedule\n", __func__);
		return NULL;
	}

	list_for_each_entry(p, &cp->engine_list, elist) {
		use_cnt = atomic_read(&p->req_count);
		if ((use_cnt < p->max_req) && (use_cnt < max_user)) {
			q = p;
			max_user = use_cnt;
		}
	}
	return q;
}

static int _qcrypto_queue_req(struct crypto_priv *cp,
				struct crypto_engine *pengine,
				struct crypto_async_request *req)
@@ -4571,6 +4708,7 @@ static int _qcrypto_probe(struct platform_device *pdev)
	struct msm_ce_hw_support *platform_support;
	struct crypto_engine *pengine;
	unsigned long flags;
	struct qcrypto_req_control *pqcrypto_req_control = NULL;

	/* For FIPS140-2 Power on self tests */
	struct fips_selftest_data selftest_d;
@@ -4595,7 +4733,6 @@ static int _qcrypto_probe(struct platform_device *pdev)
	pengine->qce = handle;
	pengine->pcp = cp;
	pengine->pdev = pdev;
	pengine->req = NULL;
	pengine->signature = 0xdeadbeef;

	init_timer(&(pengine->bw_reaper_timer));
@@ -4621,6 +4758,14 @@ static int _qcrypto_probe(struct platform_device *pdev)

	qce_hw_support(pengine->qce, &cp->ce_support);
	pengine->ce_hw_instance = cp->ce_support.ce_hw_instance;
	pengine->max_req = cp->ce_support.max_request;
	pqcrypto_req_control = kzalloc(sizeof(struct qcrypto_req_control) *
			pengine->max_req, GFP_KERNEL);
	if (pqcrypto_req_control == NULL) {
		rc = -ENOMEM;
		goto err;
	}
	qcrypto_init_req_control(pengine, pqcrypto_req_control);
	if (cp->ce_support.bam)	 {
		cp->platform_support.ce_shared = cp->ce_support.is_shared;
		cp->platform_support.shared_ce_resource = 0;
@@ -4993,6 +5138,8 @@ err:
	mutex_unlock(&cp->engine_lock);
	if (pengine->qce)
		qce_close(pengine->qce);
	if (pqcrypto_req_control)
		kzfree(pqcrypto_req_control);
	kzfree(pengine);
	return rc;
};
@@ -5001,7 +5148,8 @@ static int _qcrypto_engine_in_use(struct crypto_engine *pengine)
{
	struct crypto_priv *cp = pengine->pcp;

	if (pengine->req || pengine->req_queue.qlen || cp->req_queue.qlen)
	if ((atomic_read(&pengine->req_count) > 0) || pengine->req_queue.qlen
					|| cp->req_queue.qlen)
		return 1;
	return 0;
}
@@ -5162,6 +5310,7 @@ static ssize_t _debug_stats_write(struct file *file, const char __user *buf,
	list_for_each_entry(pe, &cp->engine_list, elist) {
		pe->total_req = 0;
		pe->err_req = 0;
		qce_clear_driver_stats(pe->qce);
	}
	spin_unlock_irqrestore(&cp->lock, flags);
	return count;