Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 0d0faeae authored by qctecmdr Service's avatar qctecmdr Service Committed by Gerrit - the friendly Code Review server
Browse files

Merge "mmc: cmdq_hci: Do not handle other requests in case of error."

parents 198635a3 94adca08
Loading
Loading
Loading
Loading
+63 −16
Original line number Diff line number Diff line
@@ -39,6 +39,7 @@
#include <linux/ioprio.h>
#include <linux/idr.h>
#include <linux/debugfs.h>
#include <linux/sched/debug.h>

#include <linux/mmc/ioctl.h>
#include <linux/mmc/card.h>
@@ -2665,7 +2666,12 @@ static int mmc_blk_cmdq_issue_rw_rq(struct mmc_queue *mq, struct request *req)
	u8 active_small_sector_read = 0;
	int ret = 0;

	mmc_cmdq_up_rwsem(host);
	mmc_deferred_scaling(host);
	ret = mmc_cmdq_down_rwsem(host, req);
	if (ret)
		return ret;

	mmc_cmdq_clk_scaling_start_busy(host, true);

	BUG_ON((req->tag < 0) || (req->tag > card->ext_csd.cmdq_depth));
@@ -2700,6 +2706,7 @@ static int mmc_blk_cmdq_issue_rw_rq(struct mmc_queue *mq, struct request *req)

	if (!ret && (host->clk_scaling.state == MMC_LOAD_LOW)) {

		mmc_cmdq_up_rwsem(host);
		ret = wait_event_interruptible_timeout(ctx->queue_empty_wq,
			(!ctx->active_reqs &&
			!test_bit(CMDQ_STATE_ERR, &ctx->curr_state)),
@@ -2708,6 +2715,8 @@ static int mmc_blk_cmdq_issue_rw_rq(struct mmc_queue *mq, struct request *req)
			pr_err("%s: queue_empty_wq timeout case? ret = (%d)\n",
				__func__, ret);
		ret = 0;
		mmc_cmdq_down_rwsem(host, NULL);

	}

	if (ret) {
@@ -2997,6 +3006,9 @@ static void mmc_blk_cmdq_err(struct mmc_queue *mq)
	if (WARN_ON(!mrq))
		return;

	down_write(&ctx_info->err_rwsem);
	pr_err("%s: %s Starting cmdq Error handler\n",
		mmc_hostname(host), __func__);
	q = mrq->req->q;
	err = mmc_cmdq_halt(host, true);
	if (err) {
@@ -3049,7 +3061,27 @@ static void mmc_blk_cmdq_err(struct mmc_queue *mq)
	host->err_mrq = NULL;
	clear_bit(CMDQ_STATE_REQ_TIMED_OUT, &ctx_info->curr_state);
	WARN_ON(!test_and_clear_bit(CMDQ_STATE_ERR, &ctx_info->curr_state));

#ifdef CONFIG_MMC_CLKGATE
	pr_err("%s: %s Exiting CMDQ Error handler clk-rqs(%d), claim-cnt(%d), claimed(%d), claimer(%s)\n",
		mmc_hostname(host), __func__, host->clk_requests,
		host->claim_cnt, host->claimed, host->claimer->comm);
#else
	pr_err("%s: %s Exiting CMDQ Error handler, claim-cnt(%d), claimed(%d), claimer(%s)\n",
		mmc_hostname(host), __func__, host->claim_cnt, host->claimed,
		host->claimer->comm);
#endif
	sched_show_task(mq->thread);
	if (host->claimed && host->claimer)
		sched_show_task(host->claimer);
#ifdef CONFIG_MMC_CLKGATE
	WARN_ON(host->clk_requests < 0);
#endif
	WARN_ON(host->claim_cnt < 0);

	up_write(&ctx_info->err_rwsem);
	wake_up(&ctx_info->wait);
	wake_up_interruptible(&host->cmdq_ctx.queue_empty_wq);
}

/* invoked by block layer in softirq context */
@@ -3062,19 +3094,30 @@ void mmc_blk_cmdq_complete_rq(struct request *rq)
	struct mmc_cmdq_req *cmdq_req = &mq_rq->cmdq_req;
	struct mmc_queue *mq = (struct mmc_queue *)rq->q->queuedata;
	int err = 0;
	int err_resp = 0;
	bool is_dcmd = false;
	bool err_rwsem = false;

	if (down_read_trylock(&ctx_info->err_rwsem)) {
		err_rwsem = true;
	} else {
		pr_err("%s: err_rwsem lock failed to acquire => err handler active\n",
			__func__);
		WARN_ON_ONCE(!test_bit(CMDQ_STATE_ERR, &ctx_info->curr_state));
		goto out;
	}

	if (mrq->cmd && mrq->cmd->error)
		err = mrq->cmd->error;
	else if (mrq->data && mrq->data->error)
		err = mrq->data->error;
	if (cmdq_req->resp_err)
		err = cmdq_req->resp_err;
		err_resp = cmdq_req->resp_err;

	if ((err || cmdq_req->resp_err) && !cmdq_req->skip_err_handling) {
	if ((err || err_resp) && !cmdq_req->skip_err_handling) {
		pr_err("%s: %s: txfr error(%d)/resp_err(%d)\n",
				mmc_hostname(mrq->host), __func__, err,
				cmdq_req->resp_err);
				err_resp);
		if (test_bit(CMDQ_STATE_ERR, &ctx_info->curr_state)) {
			pr_err("%s: CQ in error state, ending current req: %d\n",
				__func__, err);
@@ -3086,12 +3129,6 @@ void mmc_blk_cmdq_complete_rq(struct request *rq)
		}
		goto out;
	}
	/*
	 * In case of error CMDQ is expected to be either in halted
	 * or disable state so cannot receive any completion of
	 * other requests.
	 */
	WARN_ON(test_bit(CMDQ_STATE_ERR, &ctx_info->curr_state));

	/* clear pending request */
	BUG_ON(!test_and_clear_bit(cmdq_req->tag,
@@ -3125,7 +3162,7 @@ void mmc_blk_cmdq_complete_rq(struct request *rq)
out:

	mmc_cmdq_clk_scaling_stop_busy(host, true, is_dcmd);
	if (!err) {
	if (err_rwsem && !(err || err_resp)) {
		mmc_host_clk_release(host);
		wake_up(&ctx_info->wait);
		mmc_put_card(host->card);
@@ -3137,6 +3174,8 @@ void mmc_blk_cmdq_complete_rq(struct request *rq)
	if (blk_queue_stopped(mq->queue) && !ctx_info->active_reqs)
		complete(&mq->cmdq_shutdown_complete);

	if (err_rwsem)
		up_read(&ctx_info->err_rwsem);
	return;
}

@@ -3440,14 +3479,16 @@ static inline int mmc_blk_cmdq_part_switch(struct mmc_card *card,
	return ret;
}

static void  mmc_cmdq_wait_for_small_sector_read(struct mmc_card *card)
static int  mmc_cmdq_wait_for_small_sector_read(struct mmc_card *card,
						struct request *req)
{
	struct mmc_host *host = card->host;
	struct mmc_cmdq_context_info *ctx = &host->cmdq_ctx;
	int ret;
	int ret = 0;

	if ((card->quirks & MMC_QUIRK_CMDQ_EMPTY_BEFORE_DCMD) &&
		ctx->active_small_sector_read_reqs) {
		mmc_cmdq_up_rwsem(host);
		ret = wait_event_interruptible(ctx->queue_empty_wq,
					      !ctx->active_reqs);
		if (ret) {
@@ -3457,6 +3498,9 @@ static void mmc_cmdq_wait_for_small_sector_read(struct mmc_card *card)
		}
		/* clear the counter now */
		ctx->active_small_sector_read_reqs = 0;
		ret = mmc_cmdq_down_rwsem(host, req);
		if (ret)
			return ret;
		/*
		 * If there were small sector (less than 8 sectors) read
		 * operations in progress then we have to wait for the
@@ -3466,6 +3510,7 @@ static void mmc_cmdq_wait_for_small_sector_read(struct mmc_card *card)
		 */
		udelay(MMC_QUIRK_CMDQ_DELAY_BEFORE_DCMD);
	}
	return ret;
}

static int mmc_blk_cmdq_issue_drv_op(struct mmc_card *card, struct request *req)
@@ -3570,7 +3615,8 @@ static int mmc_blk_cmdq_issue_rq(struct mmc_queue *mq, struct request *req)
	if (req) {
		switch (req_op(req)) {
		case REQ_OP_DISCARD:
			mmc_cmdq_wait_for_small_sector_read(card);
			ret = mmc_cmdq_wait_for_small_sector_read(card, req);
			if (!ret)
				ret = mmc_blk_cmdq_issue_discard_rq(mq, req);
			break;
		case REQ_OP_SECURE_ERASE:
@@ -3580,7 +3626,8 @@ static int mmc_blk_cmdq_issue_rq(struct mmc_queue *mq, struct request *req)
				ret = mmc_blk_cmdq_issue_discard_rq(mq, req);
			break;
		case REQ_OP_FLUSH:
			mmc_cmdq_wait_for_small_sector_read(card);
			ret = mmc_cmdq_wait_for_small_sector_read(card, req);
			if (!ret)
				ret = mmc_blk_cmdq_issue_flush_rq(mq, req);
			break;
		case REQ_OP_DRV_IN:
+23 −1
Original line number Diff line number Diff line
@@ -133,6 +133,26 @@ static bool mmc_is_data_request(struct mmc_request *mmc_request)
	}
}

void mmc_cmdq_up_rwsem(struct mmc_host *host)
{
	struct mmc_cmdq_context_info *ctx = &host->cmdq_ctx;

	up_read(&ctx->err_rwsem);
}
EXPORT_SYMBOL(mmc_cmdq_up_rwsem);

int mmc_cmdq_down_rwsem(struct mmc_host *host, struct request *rq)
{
	struct mmc_cmdq_context_info *ctx = &host->cmdq_ctx;

	down_read(&ctx->err_rwsem);
	if (rq && !(rq->rq_flags & RQF_QUEUED))
		return -EINVAL;
	else
		return 0;
}
EXPORT_SYMBOL(mmc_cmdq_down_rwsem);

static void mmc_clk_scaling_start_busy(struct mmc_host *host, bool lock_needed)
{
	struct mmc_devfeq_clk_scaling *clk_scaling = &host->clk_scaling;
@@ -1840,8 +1860,10 @@ int mmc_cmdq_wait_for_dcmd(struct mmc_host *host,
	if (err)
		return err;

	mmc_cmdq_up_rwsem(host);
	wait_for_completion_io(&mrq->completion);
	if (cmd->error) {
	err = mmc_cmdq_down_rwsem(host, mrq->req);
	if (err || cmd->error) {
		pr_err("%s: DCMD %d failed with err %d\n",
				mmc_hostname(host), cmd->opcode,
				cmd->error);
+8 −0
Original line number Diff line number Diff line
@@ -146,7 +146,14 @@ static int mmc_cmdq_thread(void *d)
		if (kthread_should_stop())
			break;

		ret = mmc_cmdq_down_rwsem(host, mq->cmdq_req_peeked);
		if (ret) {
			mmc_cmdq_up_rwsem(host);
			continue;
		}
		ret = mq->cmdq_issue_fn(mq, mq->cmdq_req_peeked);
		mmc_cmdq_up_rwsem(host);

		/*
		 * Don't requeue if issue_fn fails.
		 * Recovery will be come by completion softirq
@@ -236,6 +243,7 @@ int mmc_cmdq_init(struct mmc_queue *mq, struct mmc_card *card)

	init_waitqueue_head(&card->host->cmdq_ctx.queue_empty_wq);
	init_waitqueue_head(&card->host->cmdq_ctx.wait);
	init_rwsem(&card->host->cmdq_ctx.err_rwsem);

	ret = blk_queue_init_tags(mq->queue, q_depth, NULL, BLK_TAG_ALLOC_FIFO);
	if (ret) {
+1 −0
Original line number Diff line number Diff line
@@ -1100,6 +1100,7 @@ irqreturn_t cmdq_irq(struct mmc_host *mmc, int err)
			}
		}
		cmdq_finish_data(mmc, tag);
		goto out;
	} else {
		cmdq_writel(cq_host, status, CQIS);
	}
+2 −0
Original line number Diff line number Diff line
@@ -213,6 +213,8 @@ extern void mmc_cmdq_clk_scaling_start_busy(struct mmc_host *host,
	bool lock_needed);
extern void mmc_cmdq_clk_scaling_stop_busy(struct mmc_host *host,
	bool lock_needed, bool is_cmdq_dcmd);
extern void mmc_cmdq_up_rwsem(struct mmc_host *host);
extern int mmc_cmdq_down_rwsem(struct mmc_host *host, struct request *rq);
extern int __mmc_switch_cmdq_mode(struct mmc_command *cmd, u8 set, u8 index,
				  u8 value, unsigned int timeout_ms,
				  bool use_busy_signal, bool ignore_timeout);
Loading