Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit cff9a175 authored by Antoine Tenart's avatar Antoine Tenart Committed by Herbert Xu
Browse files

crypto: inside-secure - move cache result dma mapping to request



In heavy traffic the DMA mapping is overwritten by multiple requests as
the DMA address is stored in a global context. This patch moves this
information to the per-hash request context so that it can't be
overwritten.

Fixes: 1b44c5a6 ("crypto: inside-secure - add SafeXcel EIP197 crypto engine driver")
Signed-off-by: default avatarAntoine Tenart <antoine.tenart@bootlin.com>
Signed-off-by: default avatarHerbert Xu <herbert@gondor.apana.org.au>
parent b8592027
Loading
Loading
Loading
Loading
+0 −14
Original line number Diff line number Diff line
@@ -537,20 +537,6 @@ void safexcel_dequeue(struct safexcel_crypto_priv *priv, int ring)
	       EIP197_HIA_CDR(priv, ring) + EIP197_HIA_xDR_PREP_COUNT);
}

void safexcel_free_context(struct safexcel_crypto_priv *priv,
			   struct crypto_async_request *req)
{
	struct safexcel_context *ctx = crypto_tfm_ctx(req->tfm);

	if (ctx->cache) {
		dma_unmap_single(priv->dev, ctx->cache_dma, ctx->cache_sz,
				 DMA_TO_DEVICE);
		kfree(ctx->cache);
		ctx->cache = NULL;
		ctx->cache_sz = 0;
	}
}

void safexcel_complete(struct safexcel_crypto_priv *priv, int ring)
{
	struct safexcel_command_desc *cdesc;
+0 −7
Original line number Diff line number Diff line
@@ -578,11 +578,6 @@ struct safexcel_context {
	int ring;
	bool needs_inv;
	bool exit_inv;

	/* Used for ahash requests */
	void *cache;
	dma_addr_t cache_dma;
	unsigned int cache_sz;
};

/*
@@ -606,8 +601,6 @@ struct safexcel_inv_result {

void safexcel_dequeue(struct safexcel_crypto_priv *priv, int ring);
void safexcel_complete(struct safexcel_crypto_priv *priv, int ring);
void safexcel_free_context(struct safexcel_crypto_priv *priv,
				  struct crypto_async_request *req);
int safexcel_invalidate_cache(struct crypto_async_request *async,
			      struct safexcel_crypto_priv *priv,
			      dma_addr_t ctxr_dma, int ring,
+18 −24
Original line number Diff line number Diff line
@@ -43,6 +43,9 @@ struct safexcel_ahash_req {
	u64 processed;

	u8 cache[SHA256_BLOCK_SIZE] __aligned(sizeof(u32));
	dma_addr_t cache_dma;
	unsigned int cache_sz;

	u8 cache_next[SHA256_BLOCK_SIZE] __aligned(sizeof(u32));
};

@@ -165,7 +168,11 @@ static int safexcel_handle_req_result(struct safexcel_crypto_priv *priv, int rin
		sreq->result_dma = 0;
	}

	safexcel_free_context(priv, async);
	if (sreq->cache_dma) {
		dma_unmap_single(priv->dev, sreq->cache_dma, sreq->cache_sz,
				 DMA_TO_DEVICE);
		sreq->cache_dma = 0;
	}

	cache_len = sreq->len - sreq->processed;
	if (cache_len)
@@ -227,24 +234,15 @@ static int safexcel_ahash_send_req(struct crypto_async_request *async, int ring,

	/* Add a command descriptor for the cached data, if any */
	if (cache_len) {
		ctx->base.cache = kzalloc(cache_len, EIP197_GFP_FLAGS(*async));
		if (!ctx->base.cache) {
			ret = -ENOMEM;
			goto unlock;
		}
		memcpy(ctx->base.cache, req->cache, cache_len);
		ctx->base.cache_dma = dma_map_single(priv->dev, ctx->base.cache,
		req->cache_dma = dma_map_single(priv->dev, req->cache,
						cache_len, DMA_TO_DEVICE);
		if (dma_mapping_error(priv->dev, ctx->base.cache_dma)) {
			ret = -EINVAL;
			goto free_cache;
		}
		if (dma_mapping_error(priv->dev, req->cache_dma))
			return -EINVAL;

		ctx->base.cache_sz = cache_len;
		req->cache_sz = cache_len;
		first_cdesc = safexcel_add_cdesc(priv, ring, 1,
						 (cache_len == len),
						 ctx->base.cache_dma,
						 cache_len, len,
						 req->cache_dma, cache_len, len,
						 ctx->base.ctxr_dma);
		if (IS_ERR(first_cdesc)) {
			ret = PTR_ERR(first_cdesc);
@@ -328,16 +326,12 @@ static int safexcel_ahash_send_req(struct crypto_async_request *async, int ring,
	for (i = 0; i < n_cdesc; i++)
		safexcel_ring_rollback_wptr(priv, &priv->ring[ring].cdr);
unmap_cache:
	if (ctx->base.cache_dma) {
		dma_unmap_single(priv->dev, ctx->base.cache_dma,
				 ctx->base.cache_sz, DMA_TO_DEVICE);
		ctx->base.cache_sz = 0;
	if (req->cache_dma) {
		dma_unmap_single(priv->dev, req->cache_dma, req->cache_sz,
				 DMA_TO_DEVICE);
		req->cache_sz = 0;
	}
free_cache:
	kfree(ctx->base.cache);
	ctx->base.cache = NULL;

unlock:
	spin_unlock_bh(&priv->ring[ring].egress_lock);
	return ret;
}