Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit c459bd7b authored by Tim Chen's avatar Tim Chen Committed by Herbert Xu
Browse files

crypto: sha512-mb - Protect sha512 mb ctx mgr access



The flusher and regular multi-buffer computation via mcryptd may race with another.
Add here a lock and turn off interrupt to to access multi-buffer
computation state cstate->mgr before a round of computation. This should
prevent the flusher code jumping in.

Signed-off-by: default avatarTim Chen <tim.c.chen@linux.intel.com>
Signed-off-by: default avatarHerbert Xu <herbert@gondor.apana.org.au>
parent 5d3d9c8b
Loading
Loading
Loading
Loading
+42 −22
Original line number Original line Diff line number Diff line
@@ -221,7 +221,7 @@ static struct sha512_hash_ctx *sha512_ctx_mgr_resubmit
}
}


static struct sha512_hash_ctx
static struct sha512_hash_ctx
		*sha512_ctx_mgr_get_comp_ctx(struct sha512_ctx_mgr *mgr)
		*sha512_ctx_mgr_get_comp_ctx(struct mcryptd_alg_cstate *cstate)
{
{
	/*
	/*
	 * If get_comp_job returns NULL, there are no jobs complete.
	 * If get_comp_job returns NULL, there are no jobs complete.
@@ -233,11 +233,17 @@ static struct sha512_hash_ctx
	 * Otherwise, all jobs currently being managed by the hash_ctx_mgr
	 * Otherwise, all jobs currently being managed by the hash_ctx_mgr
	 * still need processing.
	 * still need processing.
	 */
	 */
	struct sha512_ctx_mgr *mgr;
	struct sha512_hash_ctx *ctx;
	struct sha512_hash_ctx *ctx;
	unsigned long flags;


	mgr = cstate->mgr;
	spin_lock_irqsave(&cstate->work_lock, flags);
	ctx = (struct sha512_hash_ctx *)
	ctx = (struct sha512_hash_ctx *)
				sha512_job_mgr_get_comp_job(&mgr->mgr);
				sha512_job_mgr_get_comp_job(&mgr->mgr);
	return sha512_ctx_mgr_resubmit(mgr, ctx);
	ctx = sha512_ctx_mgr_resubmit(mgr, ctx);
	spin_unlock_irqrestore(&cstate->work_lock, flags);
	return ctx;
}
}


static void sha512_ctx_mgr_init(struct sha512_ctx_mgr *mgr)
static void sha512_ctx_mgr_init(struct sha512_ctx_mgr *mgr)
@@ -246,12 +252,17 @@ static void sha512_ctx_mgr_init(struct sha512_ctx_mgr *mgr)
}
}


static struct sha512_hash_ctx
static struct sha512_hash_ctx
			*sha512_ctx_mgr_submit(struct sha512_ctx_mgr *mgr,
			*sha512_ctx_mgr_submit(struct mcryptd_alg_cstate *cstate,
					  struct sha512_hash_ctx *ctx,
					  struct sha512_hash_ctx *ctx,
					  const void *buffer,
					  const void *buffer,
					  uint32_t len,
					  uint32_t len,
					  int flags)
					  int flags)
{
{
	struct sha512_ctx_mgr *mgr;
	unsigned long irqflags;

	mgr = cstate->mgr;
	spin_lock_irqsave(&cstate->work_lock, irqflags);
	if (flags & (~HASH_ENTIRE)) {
	if (flags & (~HASH_ENTIRE)) {
		/*
		/*
		 * User should not pass anything other than FIRST, UPDATE, or
		 * User should not pass anything other than FIRST, UPDATE, or
@@ -351,20 +362,26 @@ static struct sha512_hash_ctx
		}
		}
	}
	}


	return sha512_ctx_mgr_resubmit(mgr, ctx);
	ctx = sha512_ctx_mgr_resubmit(mgr, ctx);
	spin_unlock_irqrestore(&cstate->work_lock, irqflags);
	return ctx;
}
}


static struct sha512_hash_ctx *sha512_ctx_mgr_flush(struct sha512_ctx_mgr *mgr)
static struct sha512_hash_ctx *sha512_ctx_mgr_flush(struct mcryptd_alg_cstate *cstate)
{
{
	struct sha512_ctx_mgr *mgr;
	struct sha512_hash_ctx *ctx;
	struct sha512_hash_ctx *ctx;
	unsigned long flags;


	mgr = cstate->mgr;
	spin_lock_irqsave(&cstate->work_lock, flags);
	while (1) {
	while (1) {
		ctx = (struct sha512_hash_ctx *)
		ctx = (struct sha512_hash_ctx *)
					sha512_job_mgr_flush(&mgr->mgr);
					sha512_job_mgr_flush(&mgr->mgr);


		/* If flush returned 0, there are no more jobs in flight. */
		/* If flush returned 0, there are no more jobs in flight. */
		if (!ctx)
		if (!ctx)
			return NULL;
			break;


		/*
		/*
		 * If flush returned a job, resubmit the job to finish
		 * If flush returned a job, resubmit the job to finish
@@ -378,8 +395,10 @@ static struct sha512_hash_ctx *sha512_ctx_mgr_flush(struct sha512_ctx_mgr *mgr)
		 * the sha512_ctx_mgr still need processing. Loop.
		 * the sha512_ctx_mgr still need processing. Loop.
		 */
		 */
		if (ctx)
		if (ctx)
			return ctx;
			break;
	}
	}
	spin_unlock_irqrestore(&cstate->work_lock, flags);
	return ctx;
}
}


static int sha512_mb_init(struct ahash_request *areq)
static int sha512_mb_init(struct ahash_request *areq)
@@ -439,11 +458,11 @@ static int sha_finish_walk(struct mcryptd_hash_request_ctx **ret_rctx,
		sha_ctx = (struct sha512_hash_ctx *)
		sha_ctx = (struct sha512_hash_ctx *)
						ahash_request_ctx(&rctx->areq);
						ahash_request_ctx(&rctx->areq);
		kernel_fpu_begin();
		kernel_fpu_begin();
		sha_ctx = sha512_ctx_mgr_submit(cstate->mgr, sha_ctx,
		sha_ctx = sha512_ctx_mgr_submit(cstate, sha_ctx,
						rctx->walk.data, nbytes, flag);
						rctx->walk.data, nbytes, flag);
		if (!sha_ctx) {
		if (!sha_ctx) {
			if (flush)
			if (flush)
				sha_ctx = sha512_ctx_mgr_flush(cstate->mgr);
				sha_ctx = sha512_ctx_mgr_flush(cstate);
		}
		}
		kernel_fpu_end();
		kernel_fpu_end();
		if (sha_ctx)
		if (sha_ctx)
@@ -471,11 +490,12 @@ static int sha_complete_job(struct mcryptd_hash_request_ctx *rctx,
	struct sha512_hash_ctx *sha_ctx;
	struct sha512_hash_ctx *sha_ctx;
	struct mcryptd_hash_request_ctx *req_ctx;
	struct mcryptd_hash_request_ctx *req_ctx;
	int ret;
	int ret;
	unsigned long flags;


	/* remove from work list */
	/* remove from work list */
	spin_lock(&cstate->work_lock);
	spin_lock_irqsave(&cstate->work_lock, flags);
	list_del(&rctx->waiter);
	list_del(&rctx->waiter);
	spin_unlock(&cstate->work_lock);
	spin_unlock_irqrestore(&cstate->work_lock, flags);


	if (irqs_disabled())
	if (irqs_disabled())
		rctx->complete(&req->base, err);
		rctx->complete(&req->base, err);
@@ -486,14 +506,14 @@ static int sha_complete_job(struct mcryptd_hash_request_ctx *rctx,
	}
	}


	/* check to see if there are other jobs that are done */
	/* check to see if there are other jobs that are done */
	sha_ctx = sha512_ctx_mgr_get_comp_ctx(cstate->mgr);
	sha_ctx = sha512_ctx_mgr_get_comp_ctx(cstate);
	while (sha_ctx) {
	while (sha_ctx) {
		req_ctx = cast_hash_to_mcryptd_ctx(sha_ctx);
		req_ctx = cast_hash_to_mcryptd_ctx(sha_ctx);
		ret = sha_finish_walk(&req_ctx, cstate, false);
		ret = sha_finish_walk(&req_ctx, cstate, false);
		if (req_ctx) {
		if (req_ctx) {
			spin_lock(&cstate->work_lock);
			spin_lock_irqsave(&cstate->work_lock, flags);
			list_del(&req_ctx->waiter);
			list_del(&req_ctx->waiter);
			spin_unlock(&cstate->work_lock);
			spin_unlock_irqrestore(&cstate->work_lock, flags);


			req = cast_mcryptd_ctx_to_req(req_ctx);
			req = cast_mcryptd_ctx_to_req(req_ctx);
			if (irqs_disabled())
			if (irqs_disabled())
@@ -504,7 +524,7 @@ static int sha_complete_job(struct mcryptd_hash_request_ctx *rctx,
				local_bh_enable();
				local_bh_enable();
			}
			}
		}
		}
		sha_ctx = sha512_ctx_mgr_get_comp_ctx(cstate->mgr);
		sha_ctx = sha512_ctx_mgr_get_comp_ctx(cstate);
	}
	}


	return 0;
	return 0;
@@ -515,6 +535,7 @@ static void sha512_mb_add_list(struct mcryptd_hash_request_ctx *rctx,
{
{
	unsigned long next_flush;
	unsigned long next_flush;
	unsigned long delay = usecs_to_jiffies(FLUSH_INTERVAL);
	unsigned long delay = usecs_to_jiffies(FLUSH_INTERVAL);
	unsigned long flags;


	/* initialize tag */
	/* initialize tag */
	rctx->tag.arrival = jiffies;    /* tag the arrival time */
	rctx->tag.arrival = jiffies;    /* tag the arrival time */
@@ -522,9 +543,9 @@ static void sha512_mb_add_list(struct mcryptd_hash_request_ctx *rctx,
	next_flush = rctx->tag.arrival + delay;
	next_flush = rctx->tag.arrival + delay;
	rctx->tag.expire = next_flush;
	rctx->tag.expire = next_flush;


	spin_lock(&cstate->work_lock);
	spin_lock_irqsave(&cstate->work_lock, flags);
	list_add_tail(&rctx->waiter, &cstate->work_list);
	list_add_tail(&rctx->waiter, &cstate->work_list);
	spin_unlock(&cstate->work_lock);
	spin_unlock_irqrestore(&cstate->work_lock, flags);


	mcryptd_arm_flusher(cstate, delay);
	mcryptd_arm_flusher(cstate, delay);
}
}
@@ -565,7 +586,7 @@ static int sha512_mb_update(struct ahash_request *areq)
	sha_ctx = (struct sha512_hash_ctx *) ahash_request_ctx(areq);
	sha_ctx = (struct sha512_hash_ctx *) ahash_request_ctx(areq);
	sha512_mb_add_list(rctx, cstate);
	sha512_mb_add_list(rctx, cstate);
	kernel_fpu_begin();
	kernel_fpu_begin();
	sha_ctx = sha512_ctx_mgr_submit(cstate->mgr, sha_ctx, rctx->walk.data,
	sha_ctx = sha512_ctx_mgr_submit(cstate, sha_ctx, rctx->walk.data,
							nbytes, HASH_UPDATE);
							nbytes, HASH_UPDATE);
	kernel_fpu_end();
	kernel_fpu_end();


@@ -628,7 +649,7 @@ static int sha512_mb_finup(struct ahash_request *areq)
	sha512_mb_add_list(rctx, cstate);
	sha512_mb_add_list(rctx, cstate);


	kernel_fpu_begin();
	kernel_fpu_begin();
	sha_ctx = sha512_ctx_mgr_submit(cstate->mgr, sha_ctx, rctx->walk.data,
	sha_ctx = sha512_ctx_mgr_submit(cstate, sha_ctx, rctx->walk.data,
								nbytes, flag);
								nbytes, flag);
	kernel_fpu_end();
	kernel_fpu_end();


@@ -677,8 +698,7 @@ static int sha512_mb_final(struct ahash_request *areq)
	/* flag HASH_FINAL and 0 data size */
	/* flag HASH_FINAL and 0 data size */
	sha512_mb_add_list(rctx, cstate);
	sha512_mb_add_list(rctx, cstate);
	kernel_fpu_begin();
	kernel_fpu_begin();
	sha_ctx = sha512_ctx_mgr_submit(cstate->mgr, sha_ctx, &data, 0,
	sha_ctx = sha512_ctx_mgr_submit(cstate, sha_ctx, &data, 0, HASH_LAST);
								HASH_LAST);
	kernel_fpu_end();
	kernel_fpu_end();


	/* check if anything is returned */
	/* check if anything is returned */
@@ -940,7 +960,7 @@ static unsigned long sha512_mb_flusher(struct mcryptd_alg_cstate *cstate)
			break;
			break;
		kernel_fpu_begin();
		kernel_fpu_begin();
		sha_ctx = (struct sha512_hash_ctx *)
		sha_ctx = (struct sha512_hash_ctx *)
					sha512_ctx_mgr_flush(cstate->mgr);
					sha512_ctx_mgr_flush(cstate);
		kernel_fpu_end();
		kernel_fpu_end();
		if (!sha_ctx) {
		if (!sha_ctx) {
			pr_err("sha512_mb error: nothing got flushed for"
			pr_err("sha512_mb error: nothing got flushed for"