Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 5a7801f6 authored by Zain Wang's avatar Zain Wang Committed by Herbert Xu
Browse files

crypto: rockchip - Don't dequeue the request when device is busy



The device can only process one request at a time. So if multiple
requests came at the same time, we can enqueue them first, and
dequeue them one by one when the device is idle.

Signed-off-by: default avatarzain wang <wzz@rock-chips.com>
Signed-off-by: default avatarHerbert Xu <herbert@gondor.apana.org.au>
parent baf5b752
Loading
Loading
Loading
Loading
+43 −3
Original line number Diff line number Diff line
@@ -184,15 +184,53 @@ static irqreturn_t rk_crypto_irq_handle(int irq, void *dev_id)
	return IRQ_HANDLED;
}

static int rk_crypto_enqueue(struct rk_crypto_info *dev,
			      struct crypto_async_request *async_req)
{
	unsigned long flags;
	int ret;

	spin_lock_irqsave(&dev->lock, flags);
	ret = crypto_enqueue_request(&dev->queue, async_req);
	if (dev->busy) {
		spin_unlock_irqrestore(&dev->lock, flags);
		return ret;
	}
	dev->busy = true;
	spin_unlock_irqrestore(&dev->lock, flags);
	tasklet_schedule(&dev->queue_task);

	return ret;
}

static void rk_crypto_queue_task_cb(unsigned long data)
{
	struct rk_crypto_info *dev = (struct rk_crypto_info *)data;
	struct crypto_async_request *async_req, *backlog;
	unsigned long flags;
	int err = 0;

	dev->err = 0;
	spin_lock_irqsave(&dev->lock, flags);
	backlog   = crypto_get_backlog(&dev->queue);
	async_req = crypto_dequeue_request(&dev->queue);

	if (!async_req) {
		dev->busy = false;
		spin_unlock_irqrestore(&dev->lock, flags);
		return;
	}
	spin_unlock_irqrestore(&dev->lock, flags);

	if (backlog) {
		backlog->complete(backlog, -EINPROGRESS);
		backlog = NULL;
	}

	dev->async_req = async_req;
	err = dev->start(dev);
	if (err)
		dev->complete(dev, err);
		dev->complete(dev->async_req, err);
}

static void rk_crypto_done_task_cb(unsigned long data)
@@ -200,13 +238,13 @@ static void rk_crypto_done_task_cb(unsigned long data)
	struct rk_crypto_info *dev = (struct rk_crypto_info *)data;

	if (dev->err) {
		dev->complete(dev, dev->err);
		dev->complete(dev->async_req, dev->err);
		return;
	}

	dev->err = dev->update(dev);
	if (dev->err)
		dev->complete(dev, dev->err);
		dev->complete(dev->async_req, dev->err);
}

static struct rk_crypto_tmp *rk_cipher_algs[] = {
@@ -365,6 +403,8 @@ static int rk_crypto_probe(struct platform_device *pdev)
	crypto_info->disable_clk = rk_crypto_disable_clk;
	crypto_info->load_data = rk_load_data;
	crypto_info->unload_data = rk_unload_data;
	crypto_info->enqueue = rk_crypto_enqueue;
	crypto_info->busy = false;

	err = rk_crypto_register(crypto_info);
	if (err) {
+7 −4
Original line number Diff line number Diff line
@@ -192,8 +192,7 @@ struct rk_crypto_info {
	struct crypto_queue		queue;
	struct tasklet_struct		queue_task;
	struct tasklet_struct		done_task;
	struct ablkcipher_request	*ablk_req;
	struct ahash_request		*ahash_req;
	struct crypto_async_request	*async_req;
	int 				err;
	/* device lock */
	spinlock_t			lock;
@@ -210,18 +209,20 @@ struct rk_crypto_info {
	size_t				nents;
	unsigned int			total;
	unsigned int			count;
	u32				mode;
	dma_addr_t			addr_in;
	dma_addr_t			addr_out;
	bool				busy;
	int (*start)(struct rk_crypto_info *dev);
	int (*update)(struct rk_crypto_info *dev);
	void (*complete)(struct rk_crypto_info *dev, int err);
	void (*complete)(struct crypto_async_request *base, int err);
	int (*enable_clk)(struct rk_crypto_info *dev);
	void (*disable_clk)(struct rk_crypto_info *dev);
	int (*load_data)(struct rk_crypto_info *dev,
			 struct scatterlist *sg_src,
			 struct scatterlist *sg_dst);
	void (*unload_data)(struct rk_crypto_info *dev);
	int (*enqueue)(struct rk_crypto_info *dev,
		       struct crypto_async_request *async_req);
};

/* the private variable of hash */
@@ -234,12 +235,14 @@ struct rk_ahash_ctx {
/* the privete variable of hash for fallback */
struct rk_ahash_rctx {
	struct ahash_request		fallback_req;
	u32				mode;
};

/* the private variable of cipher */
struct rk_cipher_ctx {
	struct rk_crypto_info		*dev;
	unsigned int			keylen;
	u32				mode;
};

enum alg_type {
+50 −68
Original line number Diff line number Diff line
@@ -15,50 +15,19 @@

#define RK_CRYPTO_DEC			BIT(0)

static void rk_crypto_complete(struct rk_crypto_info *dev, int err)
static void rk_crypto_complete(struct crypto_async_request *base, int err)
{
	if (dev->ablk_req->base.complete)
		dev->ablk_req->base.complete(&dev->ablk_req->base, err);
	if (base->complete)
		base->complete(base, err);
}

static int rk_handle_req(struct rk_crypto_info *dev,
			 struct ablkcipher_request *req)
{
	unsigned long flags;
	struct crypto_async_request *async_req, *backlog;
	int err;

	if (!IS_ALIGNED(req->nbytes, dev->align_size))
		return -EINVAL;

	dev->left_bytes = req->nbytes;
	dev->total = req->nbytes;
	dev->sg_src = req->src;
	dev->first = req->src;
	dev->nents = sg_nents(req->src);
	dev->sg_dst = req->dst;
	dev->aligned = 1;
	dev->ablk_req = req;

	spin_lock_irqsave(&dev->lock, flags);
	err = ablkcipher_enqueue_request(&dev->queue, req);
	backlog   = crypto_get_backlog(&dev->queue);
	async_req = crypto_dequeue_request(&dev->queue);
	spin_unlock_irqrestore(&dev->lock, flags);

	if (!async_req) {
		dev_err(dev->dev, "async_req is NULL !!\n");
		return err;
	}
	if (backlog) {
		backlog->complete(backlog, -EINPROGRESS);
		backlog = NULL;
	}

	dev->ablk_req = ablkcipher_request_cast(async_req);

	tasklet_schedule(&dev->queue_task);
	return err;
	else
		return dev->enqueue(dev, &req->base);
}

static int rk_aes_setkey(struct crypto_ablkcipher *cipher,
@@ -108,7 +77,7 @@ static int rk_aes_ecb_encrypt(struct ablkcipher_request *req)
	struct rk_cipher_ctx *ctx = crypto_ablkcipher_ctx(tfm);
	struct rk_crypto_info *dev = ctx->dev;

	dev->mode = RK_CRYPTO_AES_ECB_MODE;
	ctx->mode = RK_CRYPTO_AES_ECB_MODE;
	return rk_handle_req(dev, req);
}

@@ -118,7 +87,7 @@ static int rk_aes_ecb_decrypt(struct ablkcipher_request *req)
	struct rk_cipher_ctx *ctx = crypto_ablkcipher_ctx(tfm);
	struct rk_crypto_info *dev = ctx->dev;

	dev->mode = RK_CRYPTO_AES_ECB_MODE | RK_CRYPTO_DEC;
	ctx->mode = RK_CRYPTO_AES_ECB_MODE | RK_CRYPTO_DEC;
	return rk_handle_req(dev, req);
}

@@ -128,7 +97,7 @@ static int rk_aes_cbc_encrypt(struct ablkcipher_request *req)
	struct rk_cipher_ctx *ctx = crypto_ablkcipher_ctx(tfm);
	struct rk_crypto_info *dev = ctx->dev;

	dev->mode = RK_CRYPTO_AES_CBC_MODE;
	ctx->mode = RK_CRYPTO_AES_CBC_MODE;
	return rk_handle_req(dev, req);
}

@@ -138,7 +107,7 @@ static int rk_aes_cbc_decrypt(struct ablkcipher_request *req)
	struct rk_cipher_ctx *ctx = crypto_ablkcipher_ctx(tfm);
	struct rk_crypto_info *dev = ctx->dev;

	dev->mode = RK_CRYPTO_AES_CBC_MODE | RK_CRYPTO_DEC;
	ctx->mode = RK_CRYPTO_AES_CBC_MODE | RK_CRYPTO_DEC;
	return rk_handle_req(dev, req);
}

@@ -148,7 +117,7 @@ static int rk_des_ecb_encrypt(struct ablkcipher_request *req)
	struct rk_cipher_ctx *ctx = crypto_ablkcipher_ctx(tfm);
	struct rk_crypto_info *dev = ctx->dev;

	dev->mode = 0;
	ctx->mode = 0;
	return rk_handle_req(dev, req);
}

@@ -158,7 +127,7 @@ static int rk_des_ecb_decrypt(struct ablkcipher_request *req)
	struct rk_cipher_ctx *ctx = crypto_ablkcipher_ctx(tfm);
	struct rk_crypto_info *dev = ctx->dev;

	dev->mode = RK_CRYPTO_DEC;
	ctx->mode = RK_CRYPTO_DEC;
	return rk_handle_req(dev, req);
}

@@ -168,7 +137,7 @@ static int rk_des_cbc_encrypt(struct ablkcipher_request *req)
	struct rk_cipher_ctx *ctx = crypto_ablkcipher_ctx(tfm);
	struct rk_crypto_info *dev = ctx->dev;

	dev->mode = RK_CRYPTO_TDES_CHAINMODE_CBC;
	ctx->mode = RK_CRYPTO_TDES_CHAINMODE_CBC;
	return rk_handle_req(dev, req);
}

@@ -178,7 +147,7 @@ static int rk_des_cbc_decrypt(struct ablkcipher_request *req)
	struct rk_cipher_ctx *ctx = crypto_ablkcipher_ctx(tfm);
	struct rk_crypto_info *dev = ctx->dev;

	dev->mode = RK_CRYPTO_TDES_CHAINMODE_CBC | RK_CRYPTO_DEC;
	ctx->mode = RK_CRYPTO_TDES_CHAINMODE_CBC | RK_CRYPTO_DEC;
	return rk_handle_req(dev, req);
}

@@ -188,7 +157,7 @@ static int rk_des3_ede_ecb_encrypt(struct ablkcipher_request *req)
	struct rk_cipher_ctx *ctx = crypto_ablkcipher_ctx(tfm);
	struct rk_crypto_info *dev = ctx->dev;

	dev->mode = RK_CRYPTO_TDES_SELECT;
	ctx->mode = RK_CRYPTO_TDES_SELECT;
	return rk_handle_req(dev, req);
}

@@ -198,7 +167,7 @@ static int rk_des3_ede_ecb_decrypt(struct ablkcipher_request *req)
	struct rk_cipher_ctx *ctx = crypto_ablkcipher_ctx(tfm);
	struct rk_crypto_info *dev = ctx->dev;

	dev->mode = RK_CRYPTO_TDES_SELECT | RK_CRYPTO_DEC;
	ctx->mode = RK_CRYPTO_TDES_SELECT | RK_CRYPTO_DEC;
	return rk_handle_req(dev, req);
}

@@ -208,7 +177,7 @@ static int rk_des3_ede_cbc_encrypt(struct ablkcipher_request *req)
	struct rk_cipher_ctx *ctx = crypto_ablkcipher_ctx(tfm);
	struct rk_crypto_info *dev = ctx->dev;

	dev->mode = RK_CRYPTO_TDES_SELECT | RK_CRYPTO_TDES_CHAINMODE_CBC;
	ctx->mode = RK_CRYPTO_TDES_SELECT | RK_CRYPTO_TDES_CHAINMODE_CBC;
	return rk_handle_req(dev, req);
}

@@ -218,15 +187,16 @@ static int rk_des3_ede_cbc_decrypt(struct ablkcipher_request *req)
	struct rk_cipher_ctx *ctx = crypto_ablkcipher_ctx(tfm);
	struct rk_crypto_info *dev = ctx->dev;

	dev->mode = RK_CRYPTO_TDES_SELECT | RK_CRYPTO_TDES_CHAINMODE_CBC |
	ctx->mode = RK_CRYPTO_TDES_SELECT | RK_CRYPTO_TDES_CHAINMODE_CBC |
		    RK_CRYPTO_DEC;
	return rk_handle_req(dev, req);
}

static void rk_ablk_hw_init(struct rk_crypto_info *dev)
{
	struct crypto_ablkcipher *cipher =
		crypto_ablkcipher_reqtfm(dev->ablk_req);
	struct ablkcipher_request *req =
		ablkcipher_request_cast(dev->async_req);
	struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(req);
	struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher);
	struct rk_cipher_ctx *ctx = crypto_ablkcipher_ctx(cipher);
	u32 ivsize, block, conf_reg = 0;
@@ -235,25 +205,23 @@ static void rk_ablk_hw_init(struct rk_crypto_info *dev)
	ivsize = crypto_ablkcipher_ivsize(cipher);

	if (block == DES_BLOCK_SIZE) {
		dev->mode |= RK_CRYPTO_TDES_FIFO_MODE |
		ctx->mode |= RK_CRYPTO_TDES_FIFO_MODE |
			     RK_CRYPTO_TDES_BYTESWAP_KEY |
			     RK_CRYPTO_TDES_BYTESWAP_IV;
		CRYPTO_WRITE(dev, RK_CRYPTO_TDES_CTRL, dev->mode);
		memcpy_toio(dev->reg + RK_CRYPTO_TDES_IV_0,
			    dev->ablk_req->info, ivsize);
		CRYPTO_WRITE(dev, RK_CRYPTO_TDES_CTRL, ctx->mode);
		memcpy_toio(dev->reg + RK_CRYPTO_TDES_IV_0, req->info, ivsize);
		conf_reg = RK_CRYPTO_DESSEL;
	} else {
		dev->mode |= RK_CRYPTO_AES_FIFO_MODE |
		ctx->mode |= RK_CRYPTO_AES_FIFO_MODE |
			     RK_CRYPTO_AES_KEY_CHANGE |
			     RK_CRYPTO_AES_BYTESWAP_KEY |
			     RK_CRYPTO_AES_BYTESWAP_IV;
		if (ctx->keylen == AES_KEYSIZE_192)
			dev->mode |= RK_CRYPTO_AES_192BIT_key;
			ctx->mode |= RK_CRYPTO_AES_192BIT_key;
		else if (ctx->keylen == AES_KEYSIZE_256)
			dev->mode |= RK_CRYPTO_AES_256BIT_key;
		CRYPTO_WRITE(dev, RK_CRYPTO_AES_CTRL, dev->mode);
		memcpy_toio(dev->reg + RK_CRYPTO_AES_IV_0,
			    dev->ablk_req->info, ivsize);
			ctx->mode |= RK_CRYPTO_AES_256BIT_key;
		CRYPTO_WRITE(dev, RK_CRYPTO_AES_CTRL, ctx->mode);
		memcpy_toio(dev->reg + RK_CRYPTO_AES_IV_0, req->info, ivsize);
	}
	conf_reg |= RK_CRYPTO_BYTESWAP_BTFIFO |
		    RK_CRYPTO_BYTESWAP_BRFIFO;
@@ -283,8 +251,18 @@ static int rk_set_data_start(struct rk_crypto_info *dev)

static int rk_ablk_start(struct rk_crypto_info *dev)
{
	struct ablkcipher_request *req =
		ablkcipher_request_cast(dev->async_req);
	unsigned long flags;
	int err;
	int err = 0;

	dev->left_bytes = req->nbytes;
	dev->total = req->nbytes;
	dev->sg_src = req->src;
	dev->first = req->src;
	dev->nents = sg_nents(req->src);
	dev->sg_dst = req->dst;
	dev->aligned = 1;

	spin_lock_irqsave(&dev->lock, flags);
	rk_ablk_hw_init(dev);
@@ -295,15 +273,16 @@ static int rk_ablk_start(struct rk_crypto_info *dev)

static void rk_iv_copyback(struct rk_crypto_info *dev)
{
	struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(dev->ablk_req);
	struct ablkcipher_request *req =
		ablkcipher_request_cast(dev->async_req);
	struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
	u32 ivsize = crypto_ablkcipher_ivsize(tfm);

	if (ivsize == DES_BLOCK_SIZE)
		memcpy_fromio(dev->ablk_req->info,
			      dev->reg + RK_CRYPTO_TDES_IV_0, ivsize);
		memcpy_fromio(req->info, dev->reg + RK_CRYPTO_TDES_IV_0,
			      ivsize);
	else if (ivsize == AES_BLOCK_SIZE)
		memcpy_fromio(dev->ablk_req->info,
			      dev->reg + RK_CRYPTO_AES_IV_0, ivsize);
		memcpy_fromio(req->info, dev->reg + RK_CRYPTO_AES_IV_0, ivsize);
}

/* return:
@@ -313,10 +292,12 @@ static void rk_iv_copyback(struct rk_crypto_info *dev)
static int rk_ablk_rx(struct rk_crypto_info *dev)
{
	int err = 0;
	struct ablkcipher_request *req =
		ablkcipher_request_cast(dev->async_req);

	dev->unload_data(dev);
	if (!dev->aligned) {
		if (!sg_pcopy_from_buffer(dev->ablk_req->dst, dev->nents,
		if (!sg_pcopy_from_buffer(req->dst, dev->nents,
					  dev->addr_vir, dev->count,
					  dev->total - dev->left_bytes -
					  dev->count)) {
@@ -339,7 +320,8 @@ static int rk_ablk_rx(struct rk_crypto_info *dev)
	} else {
		rk_iv_copyback(dev);
		/* here show the calculation is over without any err */
		dev->complete(dev, 0);
		dev->complete(dev->async_req, 0);
		tasklet_schedule(&dev->queue_task);
	}
out_rx:
	return err;
+60 −73
Original line number Diff line number Diff line
@@ -40,14 +40,16 @@ static int zero_message_process(struct ahash_request *req)
	return 0;
}

static void rk_ahash_crypto_complete(struct rk_crypto_info *dev, int err)
static void rk_ahash_crypto_complete(struct crypto_async_request *base, int err)
{
	if (dev->ahash_req->base.complete)
		dev->ahash_req->base.complete(&dev->ahash_req->base, err);
	if (base->complete)
		base->complete(base, err);
}

static void rk_ahash_reg_init(struct rk_crypto_info *dev)
{
	struct ahash_request *req = ahash_request_cast(dev->async_req);
	struct rk_ahash_rctx *rctx = ahash_request_ctx(req);
	int reg_status = 0;

	reg_status = CRYPTO_READ(dev, RK_CRYPTO_CTRL) |
@@ -67,7 +69,7 @@ static void rk_ahash_reg_init(struct rk_crypto_info *dev)
	CRYPTO_WRITE(dev, RK_CRYPTO_INTSTS, RK_CRYPTO_HRDMA_ERR_INT |
					    RK_CRYPTO_HRDMA_DONE_INT);

	CRYPTO_WRITE(dev, RK_CRYPTO_HASH_CTRL, dev->mode |
	CRYPTO_WRITE(dev, RK_CRYPTO_HASH_CTRL, rctx->mode |
					       RK_CRYPTO_HASH_SWAP_DO);

	CRYPTO_WRITE(dev, RK_CRYPTO_CONF, RK_CRYPTO_BYTESWAP_HRFIFO |
@@ -164,78 +166,13 @@ static int rk_ahash_export(struct ahash_request *req, void *out)

static int rk_ahash_digest(struct ahash_request *req)
{
	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
	struct rk_ahash_ctx *tctx = crypto_tfm_ctx(req->base.tfm);
	struct crypto_async_request *async_req, *backlog;
	struct rk_crypto_info *dev = NULL;
	unsigned long flags;
	int ret;
	struct rk_crypto_info *dev = tctx->dev;

	if (!req->nbytes)
		return zero_message_process(req);

	dev = tctx->dev;
	dev->total = req->nbytes;
	dev->left_bytes = req->nbytes;
	dev->aligned = 0;
	dev->mode = 0;
	dev->align_size = 4;
	dev->sg_dst = NULL;
	dev->sg_src = req->src;
	dev->first = req->src;
	dev->nents = sg_nents(req->src);

	switch (crypto_ahash_digestsize(tfm)) {
	case SHA1_DIGEST_SIZE:
		dev->mode = RK_CRYPTO_HASH_SHA1;
		break;
	case SHA256_DIGEST_SIZE:
		dev->mode = RK_CRYPTO_HASH_SHA256;
		break;
	case MD5_DIGEST_SIZE:
		dev->mode = RK_CRYPTO_HASH_MD5;
		break;
	default:
		return -EINVAL;
	}

	rk_ahash_reg_init(dev);

	spin_lock_irqsave(&dev->lock, flags);
	ret = crypto_enqueue_request(&dev->queue, &req->base);
	backlog   = crypto_get_backlog(&dev->queue);
	async_req = crypto_dequeue_request(&dev->queue);
	spin_unlock_irqrestore(&dev->lock, flags);

	if (!async_req) {
		dev_err(dev->dev, "async_req is NULL !!\n");
		return ret;
	}
	if (backlog) {
		backlog->complete(backlog, -EINPROGRESS);
		backlog = NULL;
	}

	dev->ahash_req = ahash_request_cast(async_req);

	tasklet_schedule(&dev->queue_task);

	/*
	 * it will take some time to process date after last dma transmission.
	 *
	 * waiting time is relative with the last date len,
	 * so cannot set a fixed time here.
	 * 10-50 makes system not call here frequently wasting
	 * efficiency, and make it response quickly when dma
	 * complete.
	 */
	while (!CRYPTO_READ(dev, RK_CRYPTO_HASH_STS))
		usleep_range(10, 50);

	memcpy_fromio(req->result, dev->reg + RK_CRYPTO_HASH_DOUT_0,
		      crypto_ahash_digestsize(tfm));

	return 0;
	else
		return dev->enqueue(dev, &req->base);
}

static void crypto_ahash_dma_start(struct rk_crypto_info *dev)
@@ -258,12 +195,45 @@ static int rk_ahash_set_data_start(struct rk_crypto_info *dev)

static int rk_ahash_start(struct rk_crypto_info *dev)
{
	struct ahash_request *req = ahash_request_cast(dev->async_req);
	struct crypto_ahash *tfm;
	struct rk_ahash_rctx *rctx;

	dev->total = req->nbytes;
	dev->left_bytes = req->nbytes;
	dev->aligned = 0;
	dev->align_size = 4;
	dev->sg_dst = NULL;
	dev->sg_src = req->src;
	dev->first = req->src;
	dev->nents = sg_nents(req->src);
	rctx = ahash_request_ctx(req);
	rctx->mode = 0;

	tfm = crypto_ahash_reqtfm(req);
	switch (crypto_ahash_digestsize(tfm)) {
	case SHA1_DIGEST_SIZE:
		rctx->mode = RK_CRYPTO_HASH_SHA1;
		break;
	case SHA256_DIGEST_SIZE:
		rctx->mode = RK_CRYPTO_HASH_SHA256;
		break;
	case MD5_DIGEST_SIZE:
		rctx->mode = RK_CRYPTO_HASH_MD5;
		break;
	default:
		return -EINVAL;
	}

	rk_ahash_reg_init(dev);
	return rk_ahash_set_data_start(dev);
}

static int rk_ahash_crypto_rx(struct rk_crypto_info *dev)
{
	int err = 0;
	struct ahash_request *req = ahash_request_cast(dev->async_req);
	struct crypto_ahash *tfm;

	dev->unload_data(dev);
	if (dev->left_bytes) {
@@ -278,7 +248,24 @@ static int rk_ahash_crypto_rx(struct rk_crypto_info *dev)
		}
		err = rk_ahash_set_data_start(dev);
	} else {
		dev->complete(dev, 0);
		/*
		 * it will take some time to process date after last dma
		 * transmission.
		 *
		 * waiting time is relative with the last date len,
		 * so cannot set a fixed time here.
		 * 10us makes system not call here frequently wasting
		 * efficiency, and make it response quickly when dma
		 * complete.
		 */
		while (!CRYPTO_READ(dev, RK_CRYPTO_HASH_STS))
			udelay(10);

		tfm = crypto_ahash_reqtfm(req);
		memcpy_fromio(req->result, dev->reg + RK_CRYPTO_HASH_DOUT_0,
			      crypto_ahash_digestsize(tfm));
		dev->complete(dev->async_req, 0);
		tasklet_schedule(&dev->queue_task);
	}

out_rx: