Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit b23f0d53 authored by Linux Build Service Account's avatar Linux Build Service Account Committed by Gerrit - the friendly Code Review server
Browse files

Merge "mmc: host: cmdq: Check if tag info extraced from CQTERRI is valid"

parents 86521a0c 509e2412
Loading
Loading
Loading
Loading
+113 −34
Original line number Diff line number Diff line
@@ -1867,11 +1867,6 @@ static struct mmc_cmdq_req *mmc_blk_cmdq_prep_discard_req(struct mmc_queue *mq,
	struct mmc_cmdq_req *cmdq_req;
	struct mmc_queue_req *active_mqrq;

	BUG_ON(req->tag > card->ext_csd.cmdq_depth);
	BUG_ON(test_and_set_bit(req->tag, &host->cmdq_ctx.active_reqs));

	set_bit(CMDQ_STATE_DCMD_ACTIVE, &ctx_info->curr_state);

	active_mqrq = &mq->mqrq_cmdq[req->tag];
	active_mqrq->req = req;

@@ -1879,6 +1874,17 @@ static struct mmc_cmdq_req *mmc_blk_cmdq_prep_discard_req(struct mmc_queue *mq,
	cmdq_req->cmdq_req_flags |= QBR;
	cmdq_req->mrq.cmd = &cmdq_req->cmd;
	cmdq_req->tag = req->tag;

	/*
	 * To avoid potential race condition with the error handler work,
	 * do the following:
	 * 1. set init_completion() only once
	 * 2. set the CMDQ_STATE_DCMD_ACTIVE only after it's tag is set
	 */
	init_completion(&cmdq_req->mrq.completion);
	WARN_ON(req->tag > card->ext_csd.cmdq_depth);
	WARN_ON(test_and_set_bit(req->tag, &host->cmdq_ctx.active_reqs));
	set_bit(CMDQ_STATE_DCMD_ACTIVE, &ctx_info->curr_state);
	return cmdq_req;
}

@@ -1922,8 +1928,21 @@ static int mmc_blk_cmdq_issue_discard_rq(struct mmc_queue *mq,
	}
	err = mmc_cmdq_erase(cmdq_req, card, from, nr, arg);
clear_dcmd:
	/*
	 * If some other request got an error while there is a DCMD request
	 * in the command queue, then err will be updated with -EAGAIN by the
	 * error handler, which indicates that caller must not call
	 * blk_complete_request() and let the request by handled by error
	 * hanlder. In all other cases, the caller only must call
	 * blk_complete_request().
	 */
	if (err != -EAGAIN) {
		mmc_host_clk_hold(card->host);
		blk_complete_request(req);
	} else {
		pr_err("%s: err(%d) handled by cmdq-error handler\n",
			__func__, err);
	}
out:
	return err ? 1 : 0;
}
@@ -2028,8 +2047,13 @@ static int mmc_blk_cmdq_issue_secdiscard_rq(struct mmc_queue *mq,
				MMC_SECURE_TRIM2_ARG);
	}
clear_dcmd:
	if (err != -EAGAIN) {
		mmc_host_clk_hold(card->host);
		blk_complete_request(req);
	} else {
		pr_err("%s: err(%d) handled by cmdq-error handler\n",
			__func__, err);
	}
out:
	return err ? 1 : 0;
}
@@ -3213,7 +3237,12 @@ static int mmc_blk_cmdq_issue_rw_rq(struct mmc_queue *mq, struct request *req)
	u8 active_small_sector_read = 0;
	int ret = 0;

	mmc_cmdq_up_rwsem(host);
	mmc_deferred_scaling(host);
	ret = mmc_cmdq_down_rwsem(host, req);
	if (ret)
		return ret;

	mmc_cmdq_clk_scaling_start_busy(host, true);

	BUG_ON((req->tag < 0) || (req->tag > card->ext_csd.cmdq_depth));
@@ -3246,9 +3275,18 @@ static int mmc_blk_cmdq_issue_rw_rq(struct mmc_queue *mq, struct request *req)
	 * empty faster and we will be able to scale up to Nominal frequency
	 * when needed.
	 */
	if (!ret && (host->clk_scaling.state == MMC_LOAD_LOW))
		wait_event_interruptible(ctx->queue_empty_wq,
					(!ctx->active_reqs));

	if (!ret && (host->clk_scaling.state == MMC_LOAD_LOW)) {

		ret = wait_event_interruptible_timeout(ctx->queue_empty_wq,
				(!ctx->active_reqs &&
				 !test_bit(CMDQ_STATE_ERR, &ctx->curr_state)),
				msecs_to_jiffies(5000));
		if (!ret)
			pr_err("%s: queue_empty_wq timeout case? ret = (%d)\n",
				__func__, ret);
		ret = 0;
	}

	if (ret) {
		/* clear pending request */
@@ -3339,20 +3377,17 @@ static void mmc_blk_cmdq_reset(struct mmc_host *host, bool clear_all)
}

/**
 * is_cmdq_dcmd_req - Checks if tag belongs to DCMD request.
 * get_cmdq_req_by_tag - returns cmdq_rq based on tag.
 * @q:		request_queue pointer.
 * @tag:	tag number of request to check.
 *
 * This function checks if the request with tag number "tag"
 * is a DCMD request or not based on cmdq_req_flags set.
 *
 * returns true if DCMD req, otherwise false.
 */
static bool is_cmdq_dcmd_req(struct request_queue *q, int tag)
static struct mmc_cmdq_req *get_cmdq_req_by_tag(struct request_queue *q,
						int tag)
{
	struct request *req;
	struct mmc_queue_req *mq_rq;
	struct mmc_cmdq_req *cmdq_req;
	struct mmc_cmdq_req *cmdq_req = NULL;

	req = blk_queue_find_tag(q, tag);
	if (WARN_ON(!req))
@@ -3361,9 +3396,8 @@ static bool is_cmdq_dcmd_req(struct request_queue *q, int tag)
	if (WARN_ON(!mq_rq))
		goto out;
	cmdq_req = &(mq_rq->cmdq_req);
	return (cmdq_req->cmdq_req_flags & DCMD);
out:
	return -ENOENT;
	return cmdq_req;
}

/**
@@ -3383,7 +3417,9 @@ static void mmc_blk_cmdq_reset_all(struct mmc_host *host, int err)
	struct mmc_cmdq_context_info *ctx_info = &host->cmdq_ctx;
	struct request_queue *q;
	int itag = 0;
	int ret = 0;
	struct mmc_cmdq_req *cmdq_req = NULL;
	struct mmc_request *dcmd_mrq;
	bool is_err_mrq_dcmd = false;

	if (WARN_ON(!mrq))
		return;
@@ -3399,18 +3435,31 @@ static void mmc_blk_cmdq_reset_all(struct mmc_host *host, int err)

	mmc_blk_cmdq_reset(host, false);

	if (mrq->cmdq_req->cmdq_req_flags & DCMD)
		is_err_mrq_dcmd = true;

	for_each_set_bit(itag, &ctx_info->active_reqs,
			host->num_cq_slots) {
		ret = is_cmdq_dcmd_req(q, itag);
		if (WARN_ON(ret == -ENOENT))
		cmdq_req = get_cmdq_req_by_tag(q, itag);
		if (WARN_ON(!cmdq_req))
			continue;
		if (!ret) {
		if (!(cmdq_req->cmdq_req_flags & DCMD)) {
			WARN_ON(!test_and_clear_bit(itag,
				 &ctx_info->data_active_reqs));
			mmc_cmdq_post_req(host, itag, err);
		} else {
			clear_bit(CMDQ_STATE_DCMD_ACTIVE,
					&ctx_info->curr_state);
			dcmd_mrq = &cmdq_req->mrq;
			WARN_ON(!test_and_clear_bit(CMDQ_STATE_DCMD_ACTIVE,
					&ctx_info->curr_state));
			pr_debug("%s: cmd(%u), req_op(%llu)\n", __func__,
				 dcmd_mrq->cmd->opcode, req_op(dcmd_mrq->req));
			if (!is_err_mrq_dcmd && !dcmd_mrq->cmd->error &&
				(req_op(dcmd_mrq->req) == REQ_OP_SECURE_ERASE ||
				 req_op(dcmd_mrq->req) == REQ_OP_DISCARD)) {
				dcmd_mrq->cmd->error = -EAGAIN;
				complete(&dcmd_mrq->completion);
			}

		}
		WARN_ON(!test_and_clear_bit(itag,
					&ctx_info->active_reqs));
@@ -3538,6 +3587,7 @@ static void mmc_blk_cmdq_err(struct mmc_queue *mq)
	if (WARN_ON(!mrq))
		return;

	down_write(&ctx_info->err_rwsem);
	q = mrq->req->q;
	err = mmc_cmdq_halt(host, true);
	if (err) {
@@ -3590,6 +3640,24 @@ static void mmc_blk_cmdq_err(struct mmc_queue *mq)
	host->err_mrq = NULL;
	clear_bit(CMDQ_STATE_REQ_TIMED_OUT, &ctx_info->curr_state);
	WARN_ON(!test_and_clear_bit(CMDQ_STATE_ERR, &ctx_info->curr_state));

#ifdef CONFIG_MMC_CLKGATE
	pr_err("%s: clk-rqs(%d), claim-cnt(%d), claimed(%d), claimer(%s)\n",
		__func__, host->clk_requests, host->claim_cnt, host->claimed,
		host->claimer->comm);
#else
	pr_err("%s: claim-cnt(%d), claimed(%d), claimer(%s)\n", __func__,
			host->claim_cnt, host->claimed, host->claimer->comm);
#endif
	sched_show_task(mq->thread);
	if (host->claimed && host->claimer)
		sched_show_task(host->claimer);
#ifdef CONFIG_MMC_CLKGATE
	WARN_ON(host->clk_requests < 0);
#endif
	WARN_ON(host->claim_cnt < 0);

	up_write(&ctx_info->err_rwsem);
	wake_up(&ctx_info->wait);
}

@@ -3604,6 +3672,16 @@ void mmc_blk_cmdq_complete_rq(struct request *rq)
	struct mmc_queue *mq = (struct mmc_queue *)rq->q->queuedata;
	int err = 0;
	bool is_dcmd = false;
	bool err_rwsem = false;

	if (down_read_trylock(&ctx_info->err_rwsem)) {
		err_rwsem = true;
	} else {
		pr_err("%s: err_rwsem lock failed to acquire => err handler active\n",
		    __func__);
		WARN_ON_ONCE(!test_bit(CMDQ_STATE_ERR, &ctx_info->curr_state));
		goto out;
	}

	if (mrq->cmd && mrq->cmd->error)
		err = mrq->cmd->error;
@@ -3625,12 +3703,6 @@ void mmc_blk_cmdq_complete_rq(struct request *rq)
		}
		goto out;
	}
	/*
	 * In case of error CMDQ is expected to be either in halted
	 * or disable state so cannot receive any completion of
	 * other requests.
	 */
	WARN_ON(test_bit(CMDQ_STATE_ERR, &ctx_info->curr_state));

	/* clear pending request */
	BUG_ON(!test_and_clear_bit(cmdq_req->tag,
@@ -3664,7 +3736,7 @@ void mmc_blk_cmdq_complete_rq(struct request *rq)
out:

	mmc_cmdq_clk_scaling_stop_busy(host, true, is_dcmd);
	if (!(err || cmdq_req->resp_err)) {
	if (err_rwsem && !(err || cmdq_req->resp_err)) {
		mmc_host_clk_release(host);
		wake_up(&ctx_info->wait);
		mmc_put_card(host->card);
@@ -3676,6 +3748,8 @@ void mmc_blk_cmdq_complete_rq(struct request *rq)
	if (blk_queue_stopped(mq->queue) && !ctx_info->active_reqs)
		complete(&mq->cmdq_shutdown_complete);

	if (err_rwsem)
		up_read(&ctx_info->err_rwsem);
	return;
}

@@ -4056,6 +4130,7 @@ static int mmc_blk_cmdq_issue_rq(struct mmc_queue *mq, struct request *req)
		if (mmc_req_is_special(req) &&
		    (card->quirks & MMC_QUIRK_CMDQ_EMPTY_BEFORE_DCMD) &&
		    ctx->active_small_sector_read_reqs) {
			mmc_cmdq_up_rwsem(host);
			ret = wait_event_interruptible(ctx->queue_empty_wq,
						      !ctx->active_reqs);
			if (ret) {
@@ -4064,6 +4139,10 @@ static int mmc_blk_cmdq_issue_rq(struct mmc_queue *mq, struct request *req)
					__func__, ret);
				BUG_ON(1);
			}
			ret = mmc_cmdq_down_rwsem(host, req);
			if (ret)
				return ret;

			/* clear the counter now */
			ctx->active_small_sector_read_reqs = 0;
			/*
+8 −0
Original line number Diff line number Diff line
@@ -133,7 +133,14 @@ static int mmc_cmdq_thread(void *d)
		if (kthread_should_stop())
			break;

		ret = mmc_cmdq_down_rwsem(host, mq->cmdq_req_peeked);
		if (ret) {
			mmc_cmdq_up_rwsem(host);
			continue;
		}
		ret = mq->cmdq_issue_fn(mq, mq->cmdq_req_peeked);
		mmc_cmdq_up_rwsem(host);

		/*
		 * Don't requeue if issue_fn fails.
		 * Recovery will be come by completion softirq
@@ -645,6 +652,7 @@ int mmc_cmdq_init(struct mmc_queue *mq, struct mmc_card *card)

	init_waitqueue_head(&card->host->cmdq_ctx.queue_empty_wq);
	init_waitqueue_head(&card->host->cmdq_ctx.wait);
	init_rwsem(&card->host->cmdq_ctx.err_rwsem);

	mq->mqrq_cmdq = kzalloc(
			sizeof(struct mmc_queue_req) * q_depth, GFP_KERNEL);
+32 −3
Original line number Diff line number Diff line
@@ -137,6 +137,34 @@ static bool mmc_is_data_request(struct mmc_request *mmc_request)
	}
}

void mmc_cmdq_up_rwsem(struct mmc_host *host)
{
	struct mmc_cmdq_context_info *ctx = &host->cmdq_ctx;

	up_read(&ctx->err_rwsem);
}
EXPORT_SYMBOL(mmc_cmdq_up_rwsem);

int mmc_cmdq_down_rwsem(struct mmc_host *host, struct request *rq)
{
	struct mmc_cmdq_context_info *ctx = &host->cmdq_ctx;

	down_read(&ctx->err_rwsem);
	/*
	 * This is to prevent a case where issue context has already
	 * called blk_queue_start_tag(), immediately after which error
	 * handler work has run and called blk_queue_invalidate_tags().
	 * In this case, the issue context should check for REQ_QUEUED
	 * before proceeding with that request. It should ideally call
	 * blk_queue_start_tag() again on the requeued request.
	 */
	if (!(rq->cmd_flags & REQ_QUEUED))
		return -EINVAL;
	else
		return 0;
}
EXPORT_SYMBOL(mmc_cmdq_down_rwsem);

static void mmc_clk_scaling_start_busy(struct mmc_host *host, bool lock_needed)
{
	struct mmc_devfeq_clk_scaling *clk_scaling = &host->clk_scaling;
@@ -1841,14 +1869,15 @@ int mmc_cmdq_wait_for_dcmd(struct mmc_host *host,
	struct mmc_command *cmd = mrq->cmd;
	int err = 0;

	init_completion(&mrq->completion);
	mrq->done = mmc_cmdq_dcmd_req_done;
	err = mmc_cmdq_start_req(host, cmdq_req);
	if (err)
		return err;

	mmc_cmdq_up_rwsem(host);
	wait_for_completion_io(&mrq->completion);
	if (cmd->error) {
	err = mmc_cmdq_down_rwsem(host, mrq->req);
	if (err || cmd->error) {
		pr_err("%s: DCMD %d failed with err %d\n",
				mmc_hostname(host), cmd->opcode,
				cmd->error);
@@ -3717,7 +3746,7 @@ static int mmc_cmdq_send_erase_cmd(struct mmc_cmdq_req *cmdq_req,
	if (err) {
		pr_err("mmc_erase: group start error %d, status %#x\n",
				err, cmd->resp[0]);
		return -EIO;
		return err;
	}
	return 0;
}
+65 −14
Original line number Diff line number Diff line
@@ -864,6 +864,33 @@ static int cmdq_request(struct mmc_host *mmc, struct mmc_request *mrq)
	return err;
}

static int cmdq_get_first_valid_tag(struct cmdq_host *cq_host)
{
	u32 dbr_set = 0, tag = 0;

	dbr_set = cmdq_readl(cq_host, CQTDBR);
	if (!dbr_set) {
		pr_err("%s: spurious/force error interrupt\n",
				mmc_hostname(cq_host->mmc));
		cmdq_halt_poll(cq_host->mmc, false);
		mmc_host_clr_halt(cq_host->mmc);
		return -EINVAL;
	}

	tag = ffs(dbr_set) - 1;
	pr_err("%s: error tag selected: tag = %d\n",
		mmc_hostname(cq_host->mmc), tag);
	return tag;
}

static bool cmdq_is_valid_tag(struct mmc_host *mmc, unsigned int tag)
{
	struct mmc_cmdq_context_info *ctx_info = &mmc->cmdq_ctx;

	return
	(!!(ctx_info->data_active_reqs & (1 << tag)) || tag == DCMD_SLOT);
}

static void cmdq_finish_data(struct mmc_host *mmc, unsigned int tag)
{
	struct mmc_request *mrq;
@@ -899,7 +926,7 @@ static void cmdq_finish_data(struct mmc_host *mmc, unsigned int tag)
	mrq->done(mrq);
}

irqreturn_t cmdq_irq(struct mmc_host *mmc, int err)
irqreturn_t cmdq_irq(struct mmc_host *mmc, int err, bool is_cmd_err)
{
	u32 status;
	unsigned long tag = 0, comp_status;
@@ -961,18 +988,10 @@ irqreturn_t cmdq_irq(struct mmc_host *mmc, int err)
			 *   have caused such error, so check for any first
			 *   bit set in doorbell and proceed with an error.
			 */
			dbr_set = cmdq_readl(cq_host, CQTDBR);
			if (!dbr_set) {
				pr_err("%s: spurious/force error interrupt\n",
						mmc_hostname(mmc));
				cmdq_halt_poll(mmc, false);
				mmc_host_clr_halt(mmc);
				return IRQ_HANDLED;
			}
			tag = cmdq_get_first_valid_tag(cq_host);
			if (tag == -EINVAL)
				goto hac;

			tag = ffs(dbr_set) - 1;
			pr_err("%s: error tag selected: tag = %lu\n",
					mmc_hostname(mmc), tag);
			mrq = get_req_by_tag(cq_host, tag);
			if (mrq->data)
				mrq->data->error = err;
@@ -987,10 +1006,24 @@ irqreturn_t cmdq_irq(struct mmc_host *mmc, int err)
			goto skip_cqterri;
		}

		if (err_info & CQ_RMEFV) {
		if (is_cmd_err && (err_info & CQ_RMEFV)) {
			tag = GET_CMD_ERR_TAG(err_info);
			pr_err("%s: CMD err tag: %lu\n", __func__, tag);

			/*
			 * In some cases CQTERRI is not providing reliable tag
			 * info. If the tag is not valid, complete the request
			 * with any valid tag so that all tags will get
			 * requeued.
			 */
			if (!cmdq_is_valid_tag(mmc, tag)) {
				pr_err("%s: CMD err tag is invalid: %lu\n",
						__func__, tag);
				tag = cmdq_get_first_valid_tag(cq_host);
				if (tag == -EINVAL)
					goto hac;
			}

			mrq = get_req_by_tag(cq_host, tag);
			/* CMD44/45/46/47 will not have a valid cmd */
			if (mrq->cmd)
@@ -1000,8 +1033,26 @@ irqreturn_t cmdq_irq(struct mmc_host *mmc, int err)
		} else {
			tag = GET_DAT_ERR_TAG(err_info);
			pr_err("%s: Dat err  tag: %lu\n", __func__, tag);

			/*
			 * In some cases CQTERRI is not providing reliable tag
			 * info. If the tag is not valid, complete the request
			 * with any valid tag so that all tags will get
			 * requeued.
			 */
			if (!cmdq_is_valid_tag(mmc, tag)) {
				pr_err("%s: CMD err tag is invalid: %lu\n",
						__func__, tag);
				tag = cmdq_get_first_valid_tag(cq_host);
				if (tag == -EINVAL)
					goto hac;
			}
			mrq = get_req_by_tag(cq_host, tag);

			if (mrq->data)
				mrq->data->error = err;
			else
				mrq->cmd->error = err;
		}

skip_cqterri:
+2 −2
Original line number Diff line number Diff line
/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 and
@@ -244,7 +244,7 @@ static inline u32 cmdq_readl(struct cmdq_host *host, int reg)
		return readl_relaxed(host->mmio + reg);
}

extern irqreturn_t cmdq_irq(struct mmc_host *mmc, int err);
extern irqreturn_t cmdq_irq(struct mmc_host *mmc, int err, bool is_cmd_err);
extern int cmdq_init(struct cmdq_host *cq_host, struct mmc_host *mmc,
		     bool dma64);
extern struct cmdq_host *cmdq_pltfm_init(struct platform_device *pdev);
Loading