Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 6af05365 authored by Ritesh Harjani's avatar Ritesh Harjani
Browse files

mmc: CMDQ Error handling context syncronization



This patch adds err_rwsem in mmc_cmdq_context_info,
which is used to synchronize between:
1. Error handler work <--> blk softirqs completion.
2. Error handler work <--> mmc_cmdq_thread issue ctx.

Error handler takes the writer lock and remaining
contextx which can run in parallel needs to rake read
semaphore.

To make sure that we release the read rwsem before
sleeping in case we expect error handler to run and
again check the state of the req. If the req is not
valid we return to the caller otherwise we proceed
to issue this req to controller.

Also note that error handler as of now is doing mmc_put_card
for all the requests pulled for which mmc_get_card is done.
So in case if the request was re-queued by cmdq-err handler,
then corresponding mmc_put_card is also already done.
So after we wake up and acquire the semaphore, we should
simply return to the caller.

TODO: Other IOCTL contxt checking

Change-Id: I63d27faa14c33af2f71a3416cd840f13764e47ae
Signed-off-by: default avatarRitesh Harjani <riteshh@codeaurora.org>
Signed-off-by: default avatarAsutosh Das <asutoshd@codeaurora.org>
Signed-off-by: default avatarVeerabhadrarao Badiganti <vbadigan@codeaurora.org>
parent 9d3506ec
Loading
Loading
Loading
Loading
+52 −10
Original line number Diff line number Diff line
@@ -39,6 +39,7 @@
#include <linux/ioprio.h>
#include <linux/idr.h>
#include <linux/debugfs.h>
#include <linux/sched/debug.h>

#include <linux/mmc/ioctl.h>
#include <linux/mmc/card.h>
@@ -2665,7 +2666,12 @@ static int mmc_blk_cmdq_issue_rw_rq(struct mmc_queue *mq, struct request *req)
	u8 active_small_sector_read = 0;
	int ret = 0;

	mmc_cmdq_up_rwsem(host);
	mmc_deferred_scaling(host);
	ret = mmc_cmdq_down_rwsem(host, req);
	if (ret)
		return ret;

	mmc_cmdq_clk_scaling_start_busy(host, true);

	BUG_ON((req->tag < 0) || (req->tag > card->ext_csd.cmdq_depth));
@@ -2700,6 +2706,7 @@ static int mmc_blk_cmdq_issue_rw_rq(struct mmc_queue *mq, struct request *req)

	if (!ret && (host->clk_scaling.state == MMC_LOAD_LOW)) {

		mmc_cmdq_up_rwsem(host);
		ret = wait_event_interruptible_timeout(ctx->queue_empty_wq,
			(!ctx->active_reqs &&
			!test_bit(CMDQ_STATE_ERR, &ctx->curr_state)),
@@ -2708,6 +2715,8 @@ static int mmc_blk_cmdq_issue_rw_rq(struct mmc_queue *mq, struct request *req)
			pr_err("%s: queue_empty_wq timeout case? ret = (%d)\n",
				__func__, ret);
		ret = 0;
		mmc_cmdq_down_rwsem(host, NULL);

	}

	if (ret) {
@@ -2997,6 +3006,9 @@ static void mmc_blk_cmdq_err(struct mmc_queue *mq)
	if (WARN_ON(!mrq))
		return;

	down_write(&ctx_info->err_rwsem);
	pr_err("%s: %s Starting cmdq Error handler\n",
		mmc_hostname(host), __func__);
	q = mrq->req->q;
	err = mmc_cmdq_halt(host, true);
	if (err) {
@@ -3049,6 +3061,25 @@ static void mmc_blk_cmdq_err(struct mmc_queue *mq)
	host->err_mrq = NULL;
	clear_bit(CMDQ_STATE_REQ_TIMED_OUT, &ctx_info->curr_state);
	WARN_ON(!test_and_clear_bit(CMDQ_STATE_ERR, &ctx_info->curr_state));

#ifdef CONFIG_MMC_CLKGATE
	pr_err("%s: %s Exiting CMDQ Error handler clk-rqs(%d), claim-cnt(%d), claimed(%d), claimer(%s)\n",
		mmc_hostname(host), __func__, host->clk_requests,
		host->claim_cnt, host->claimed, host->claimer->comm);
#else
	pr_err("%s: %s Exiting CMDQ Error handler, claim-cnt(%d), claimed(%d), claimer(%s)\n",
		mmc_hostname(host), __func__, host->claim_cnt, host->claimed,
		host->claimer->comm);
#endif
	sched_show_task(mq->thread);
	if (host->claimed && host->claimer)
		sched_show_task(host->claimer);
#ifdef CONFIG_MMC_CLKGATE
	WARN_ON(host->clk_requests < 0);
#endif
	WARN_ON(host->claim_cnt < 0);

	up_write(&ctx_info->err_rwsem);
	wake_up(&ctx_info->wait);
}

@@ -3062,19 +3093,30 @@ void mmc_blk_cmdq_complete_rq(struct request *rq)
	struct mmc_cmdq_req *cmdq_req = &mq_rq->cmdq_req;
	struct mmc_queue *mq = (struct mmc_queue *)rq->q->queuedata;
	int err = 0;
	int err_resp = 0;
	bool is_dcmd = false;
	bool err_rwsem = false;

	if (down_read_trylock(&ctx_info->err_rwsem)) {
		err_rwsem = true;
	} else {
		pr_err("%s: err_rwsem lock failed to acquire => err handler active\n",
			__func__);
		WARN_ON_ONCE(!test_bit(CMDQ_STATE_ERR, &ctx_info->curr_state));
		goto out;
	}

	if (mrq->cmd && mrq->cmd->error)
		err = mrq->cmd->error;
	else if (mrq->data && mrq->data->error)
		err = mrq->data->error;
	if (cmdq_req->resp_err)
		err = cmdq_req->resp_err;
		err_resp = cmdq_req->resp_err;

	if ((err || cmdq_req->resp_err) && !cmdq_req->skip_err_handling) {
	if ((err || err_resp) && !cmdq_req->skip_err_handling) {
		pr_err("%s: %s: txfr error(%d)/resp_err(%d)\n",
				mmc_hostname(mrq->host), __func__, err,
				cmdq_req->resp_err);
				err_resp);
		if (test_bit(CMDQ_STATE_ERR, &ctx_info->curr_state)) {
			pr_err("%s: CQ in error state, ending current req: %d\n",
				__func__, err);
@@ -3086,12 +3128,6 @@ void mmc_blk_cmdq_complete_rq(struct request *rq)
		}
		goto out;
	}
	/*
	 * In case of error CMDQ is expected to be either in halted
	 * or disable state so cannot receive any completion of
	 * other requests.
	 */
	WARN_ON(test_bit(CMDQ_STATE_ERR, &ctx_info->curr_state));

	/* clear pending request */
	BUG_ON(!test_and_clear_bit(cmdq_req->tag,
@@ -3125,7 +3161,7 @@ void mmc_blk_cmdq_complete_rq(struct request *rq)
out:

	mmc_cmdq_clk_scaling_stop_busy(host, true, is_dcmd);
	if (!err) {
	if (err_rwsem && !(err || err_resp)) {
		mmc_host_clk_release(host);
		wake_up(&ctx_info->wait);
		mmc_put_card(host->card);
@@ -3137,6 +3173,8 @@ void mmc_blk_cmdq_complete_rq(struct request *rq)
	if (blk_queue_stopped(mq->queue) && !ctx_info->active_reqs)
		complete(&mq->cmdq_shutdown_complete);

	if (err_rwsem)
		up_read(&ctx_info->err_rwsem);
	return;
}

@@ -3449,6 +3487,7 @@ static int mmc_cmdq_wait_for_small_sector_read(struct mmc_card *card,

	if ((card->quirks & MMC_QUIRK_CMDQ_EMPTY_BEFORE_DCMD) &&
		ctx->active_small_sector_read_reqs) {
		mmc_cmdq_up_rwsem(host);
		ret = wait_event_interruptible(ctx->queue_empty_wq,
					      !ctx->active_reqs);
		if (ret) {
@@ -3458,6 +3497,9 @@ static int mmc_cmdq_wait_for_small_sector_read(struct mmc_card *card,
		}
		/* clear the counter now */
		ctx->active_small_sector_read_reqs = 0;
		ret = mmc_cmdq_down_rwsem(host, req);
		if (ret)
			return ret;
		/*
		 * If there were small sector (less than 8 sectors) read
		 * operations in progress then we have to wait for the
+23 −1
Original line number Diff line number Diff line
@@ -133,6 +133,26 @@ static bool mmc_is_data_request(struct mmc_request *mmc_request)
	}
}

void mmc_cmdq_up_rwsem(struct mmc_host *host)
{
	struct mmc_cmdq_context_info *ctx = &host->cmdq_ctx;

	up_read(&ctx->err_rwsem);
}
EXPORT_SYMBOL(mmc_cmdq_up_rwsem);

int mmc_cmdq_down_rwsem(struct mmc_host *host, struct request *rq)
{
	struct mmc_cmdq_context_info *ctx = &host->cmdq_ctx;

	down_read(&ctx->err_rwsem);
	if (rq && !(rq->rq_flags & RQF_QUEUED))
		return -EINVAL;
	else
		return 0;
}
EXPORT_SYMBOL(mmc_cmdq_down_rwsem);

static void mmc_clk_scaling_start_busy(struct mmc_host *host, bool lock_needed)
{
	struct mmc_devfeq_clk_scaling *clk_scaling = &host->clk_scaling;
@@ -1840,8 +1860,10 @@ int mmc_cmdq_wait_for_dcmd(struct mmc_host *host,
	if (err)
		return err;

	mmc_cmdq_up_rwsem(host);
	wait_for_completion_io(&mrq->completion);
	if (cmd->error) {
	err = mmc_cmdq_down_rwsem(host, mrq->req);
	if (err || cmd->error) {
		pr_err("%s: DCMD %d failed with err %d\n",
				mmc_hostname(host), cmd->opcode,
				cmd->error);
+8 −0
Original line number Diff line number Diff line
@@ -146,7 +146,14 @@ static int mmc_cmdq_thread(void *d)
		if (kthread_should_stop())
			break;

		ret = mmc_cmdq_down_rwsem(host, mq->cmdq_req_peeked);
		if (ret) {
			mmc_cmdq_up_rwsem(host);
			continue;
		}
		ret = mq->cmdq_issue_fn(mq, mq->cmdq_req_peeked);
		mmc_cmdq_up_rwsem(host);

		/*
		 * Don't requeue if issue_fn fails.
		 * Recovery will be come by completion softirq
@@ -236,6 +243,7 @@ int mmc_cmdq_init(struct mmc_queue *mq, struct mmc_card *card)

	init_waitqueue_head(&card->host->cmdq_ctx.queue_empty_wq);
	init_waitqueue_head(&card->host->cmdq_ctx.wait);
	init_rwsem(&card->host->cmdq_ctx.err_rwsem);

	ret = blk_queue_init_tags(mq->queue, q_depth, NULL, BLK_TAG_ALLOC_FIFO);
	if (ret) {
+2 −0
Original line number Diff line number Diff line
@@ -213,6 +213,8 @@ extern void mmc_cmdq_clk_scaling_start_busy(struct mmc_host *host,
	bool lock_needed);
extern void mmc_cmdq_clk_scaling_stop_busy(struct mmc_host *host,
	bool lock_needed, bool is_cmdq_dcmd);
extern void mmc_cmdq_up_rwsem(struct mmc_host *host);
extern int mmc_cmdq_down_rwsem(struct mmc_host *host, struct request *rq);
extern int __mmc_switch_cmdq_mode(struct mmc_command *cmd, u8 set, u8 index,
				  u8 value, unsigned int timeout_ms,
				  bool use_busy_signal, bool ignore_timeout);
+2 −0
Original line number Diff line number Diff line
@@ -327,6 +327,7 @@ struct mmc_slot {
 * @wait		waiting for all conditions described in
 *			mmc_cmdq_ready_wait to be satisified before
 *			issuing the new request to LLD.
 * @err_rwsem		synchronizes issue/completion/error-handler ctx
 */
struct mmc_cmdq_context_info {
	unsigned long	active_reqs; /* in-flight requests */
@@ -340,6 +341,7 @@ struct mmc_cmdq_context_info {
	wait_queue_head_t	queue_empty_wq;
	wait_queue_head_t	wait;
	int active_small_sector_read_reqs;
	struct rw_semaphore err_rwsem;
};

/**