Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 4f5511f8 authored by Ritesh Harjani's avatar Ritesh Harjani Committed by Gerrit - the friendly Code Review server
Browse files

mmc: CMDQ Error handling context syncronization



This patch adds err_rwsem in mmc_cmdq_context_info,
which is used to synchronize between:
1. Error handler work <--> blk softirqs completion.
2. Error handler work <--> mmc_cmdq_thread issue ctx.

Error handler takes the writer lock and remaining
contexts which can run in parallel needs to rake read
semaphore.

To make sure that we release the read rwsem before
sleeping in case we expect error handler to run and
again check the state of the req. If the req is not
valid we return to the caller otherwise we proceed
to issue this req to controller.

Also note that error handler as of now is doing mmc_put_card
for all the requests pulled for which mmc_get_card is done.
So in case if the request was re-queued by cmdq-err handler,
then corresponding mmc_put_card is also already done.
So after we wake up and acquire the semaphore, we should
simply return to the caller.

Change-Id: Ie29a02cee4e42a0df1bd0fff6c746f112527b433
Signed-off-by: default avatarRitesh Harjani <riteshh@codeaurora.org>
Signed-off-by: default avatarSahitya Tummala <stummala@codeaurora.org>
parent 98117c1e
Loading
Loading
Loading
Loading
+54 −10
Original line number Diff line number Diff line
@@ -3237,7 +3237,12 @@ static int mmc_blk_cmdq_issue_rw_rq(struct mmc_queue *mq, struct request *req)
	u8 active_small_sector_read = 0;
	int ret = 0;

	mmc_cmdq_up_rwsem(host);
	mmc_deferred_scaling(host);
	ret = mmc_cmdq_down_rwsem(host, req);
	if (ret)
		return ret;

	mmc_cmdq_clk_scaling_start_busy(host, true);

	BUG_ON((req->tag < 0) || (req->tag > card->ext_csd.cmdq_depth));
@@ -3270,9 +3275,18 @@ static int mmc_blk_cmdq_issue_rw_rq(struct mmc_queue *mq, struct request *req)
	 * empty faster and we will be able to scale up to Nominal frequency
	 * when needed.
	 */
	if (!ret && (host->clk_scaling.state == MMC_LOAD_LOW))
		wait_event_interruptible(ctx->queue_empty_wq,
					(!ctx->active_reqs));

	if (!ret && (host->clk_scaling.state == MMC_LOAD_LOW)) {

		ret = wait_event_interruptible_timeout(ctx->queue_empty_wq,
				(!ctx->active_reqs &&
				 !test_bit(CMDQ_STATE_ERR, &ctx->curr_state)),
				msecs_to_jiffies(5000));
		if (!ret)
			pr_err("%s: queue_empty_wq timeout case? ret = (%d)\n",
				__func__, ret);
		ret = 0;
	}

	if (ret) {
		/* clear pending request */
@@ -3573,6 +3587,7 @@ static void mmc_blk_cmdq_err(struct mmc_queue *mq)
	if (WARN_ON(!mrq))
		return;

	down_write(&ctx_info->err_rwsem);
	q = mrq->req->q;
	err = mmc_cmdq_halt(host, true);
	if (err) {
@@ -3625,6 +3640,24 @@ static void mmc_blk_cmdq_err(struct mmc_queue *mq)
	host->err_mrq = NULL;
	clear_bit(CMDQ_STATE_REQ_TIMED_OUT, &ctx_info->curr_state);
	WARN_ON(!test_and_clear_bit(CMDQ_STATE_ERR, &ctx_info->curr_state));

#ifdef CONFIG_MMC_CLKGATE
	pr_err("%s: clk-rqs(%d), claim-cnt(%d), claimed(%d), claimer(%s)\n",
		__func__, host->clk_requests, host->claim_cnt, host->claimed,
		host->claimer->comm);
#else
	pr_err("%s: claim-cnt(%d), claimed(%d), claimer(%s)\n", __func__,
			host->claim_cnt, host->claimed, host->claimer->comm);
#endif
	sched_show_task(mq->thread);
	if (host->claimed && host->claimer)
		sched_show_task(host->claimer);
#ifdef CONFIG_MMC_CLKGATE
	WARN_ON(host->clk_requests < 0);
#endif
	WARN_ON(host->claim_cnt < 0);

	up_write(&ctx_info->err_rwsem);
	wake_up(&ctx_info->wait);
}

@@ -3639,6 +3672,16 @@ void mmc_blk_cmdq_complete_rq(struct request *rq)
	struct mmc_queue *mq = (struct mmc_queue *)rq->q->queuedata;
	int err = 0;
	bool is_dcmd = false;
	bool err_rwsem = false;

	if (down_read_trylock(&ctx_info->err_rwsem)) {
		err_rwsem = true;
	} else {
		pr_err("%s: err_rwsem lock failed to acquire => err handler active\n",
		    __func__);
		WARN_ON_ONCE(!test_bit(CMDQ_STATE_ERR, &ctx_info->curr_state));
		goto out;
	}

	if (mrq->cmd && mrq->cmd->error)
		err = mrq->cmd->error;
@@ -3660,12 +3703,6 @@ void mmc_blk_cmdq_complete_rq(struct request *rq)
		}
		goto out;
	}
	/*
	 * In case of error CMDQ is expected to be either in halted
	 * or disable state so cannot receive any completion of
	 * other requests.
	 */
	WARN_ON(test_bit(CMDQ_STATE_ERR, &ctx_info->curr_state));

	/* clear pending request */
	BUG_ON(!test_and_clear_bit(cmdq_req->tag,
@@ -3699,7 +3736,7 @@ void mmc_blk_cmdq_complete_rq(struct request *rq)
out:

	mmc_cmdq_clk_scaling_stop_busy(host, true, is_dcmd);
	if (!(err || cmdq_req->resp_err)) {
	if (err_rwsem && !(err || cmdq_req->resp_err)) {
		mmc_host_clk_release(host);
		wake_up(&ctx_info->wait);
		mmc_put_card(host->card);
@@ -3711,6 +3748,8 @@ void mmc_blk_cmdq_complete_rq(struct request *rq)
	if (blk_queue_stopped(mq->queue) && !ctx_info->active_reqs)
		complete(&mq->cmdq_shutdown_complete);

	if (err_rwsem)
		up_read(&ctx_info->err_rwsem);
	return;
}

@@ -4091,6 +4130,7 @@ static int mmc_blk_cmdq_issue_rq(struct mmc_queue *mq, struct request *req)
		if (mmc_req_is_special(req) &&
		    (card->quirks & MMC_QUIRK_CMDQ_EMPTY_BEFORE_DCMD) &&
		    ctx->active_small_sector_read_reqs) {
			mmc_cmdq_up_rwsem(host);
			ret = wait_event_interruptible(ctx->queue_empty_wq,
						      !ctx->active_reqs);
			if (ret) {
@@ -4099,6 +4139,10 @@ static int mmc_blk_cmdq_issue_rq(struct mmc_queue *mq, struct request *req)
					__func__, ret);
				BUG_ON(1);
			}
			ret = mmc_cmdq_down_rwsem(host, req);
			if (ret)
				return ret;

			/* clear the counter now */
			ctx->active_small_sector_read_reqs = 0;
			/*
+8 −0
Original line number Diff line number Diff line
@@ -133,7 +133,14 @@ static int mmc_cmdq_thread(void *d)
		if (kthread_should_stop())
			break;

		ret = mmc_cmdq_down_rwsem(host, mq->cmdq_req_peeked);
		if (ret) {
			mmc_cmdq_up_rwsem(host);
			continue;
		}
		ret = mq->cmdq_issue_fn(mq, mq->cmdq_req_peeked);
		mmc_cmdq_up_rwsem(host);

		/*
		 * Don't requeue if issue_fn fails.
		 * Recovery will be come by completion softirq
@@ -645,6 +652,7 @@ int mmc_cmdq_init(struct mmc_queue *mq, struct mmc_card *card)

	init_waitqueue_head(&card->host->cmdq_ctx.queue_empty_wq);
	init_waitqueue_head(&card->host->cmdq_ctx.wait);
	init_rwsem(&card->host->cmdq_ctx.err_rwsem);

	mq->mqrq_cmdq = kzalloc(
			sizeof(struct mmc_queue_req) * q_depth, GFP_KERNEL);
+31 −1
Original line number Diff line number Diff line
@@ -137,6 +137,34 @@ static bool mmc_is_data_request(struct mmc_request *mmc_request)
	}
}

void mmc_cmdq_up_rwsem(struct mmc_host *host)
{
	struct mmc_cmdq_context_info *ctx = &host->cmdq_ctx;

	up_read(&ctx->err_rwsem);
}
EXPORT_SYMBOL(mmc_cmdq_up_rwsem);

int mmc_cmdq_down_rwsem(struct mmc_host *host, struct request *rq)
{
	struct mmc_cmdq_context_info *ctx = &host->cmdq_ctx;

	down_read(&ctx->err_rwsem);
	/*
	 * This is to prevent a case where issue context has already
	 * called blk_queue_start_tag(), immediately after which error
	 * handler work has run and called blk_queue_invalidate_tags().
	 * In this case, the issue context should check for REQ_QUEUED
	 * before proceeding with that request. It should ideally call
	 * blk_queue_start_tag() again on the requeued request.
	 */
	if (!(rq->cmd_flags & REQ_QUEUED))
		return -EINVAL;
	else
		return 0;
}
EXPORT_SYMBOL(mmc_cmdq_down_rwsem);

static void mmc_clk_scaling_start_busy(struct mmc_host *host, bool lock_needed)
{
	struct mmc_devfeq_clk_scaling *clk_scaling = &host->clk_scaling;
@@ -1849,8 +1877,10 @@ int mmc_cmdq_wait_for_dcmd(struct mmc_host *host,
	if (err)
		return err;

	mmc_cmdq_up_rwsem(host);
	wait_for_completion_io(&mrq->completion);
	if (cmd->error) {
	err = mmc_cmdq_down_rwsem(host, mrq->req);
	if (err || cmd->error) {
		pr_err("%s: DCMD %d failed with err %d\n",
				mmc_hostname(host), cmd->opcode,
				cmd->error);
+2 −0
Original line number Diff line number Diff line
@@ -237,6 +237,8 @@ extern void mmc_cmdq_clk_scaling_start_busy(struct mmc_host *host,
extern void mmc_cmdq_clk_scaling_stop_busy(struct mmc_host *host,
	bool lock_needed, bool is_cmdq_dcmd);
extern int mmc_recovery_fallback_lower_speed(struct mmc_host *host);
extern void mmc_cmdq_up_rwsem(struct mmc_host *host);
extern int mmc_cmdq_down_rwsem(struct mmc_host *host, struct request *rq);

/**
 *	mmc_claim_host - exclusively claim a host
+2 −0
Original line number Diff line number Diff line
@@ -286,6 +286,7 @@ struct mmc_slot {
 * @wait		waiting for all conditions described in
 *			mmc_cmdq_ready_wait to be satisified before
 *			issuing the new request to LLD.
 * @err_rwsem		synchronizes issue/completion/error-handler ctx
 */
struct mmc_cmdq_context_info {
	unsigned long	active_reqs; /* in-flight requests */
@@ -299,6 +300,7 @@ struct mmc_cmdq_context_info {
	wait_queue_head_t	queue_empty_wq;
	wait_queue_head_t	wait;
	int active_small_sector_read_reqs;
	struct rw_semaphore err_rwsem;
};

/**