Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 7fdacd50 authored by Linux Build Service Account's avatar Linux Build Service Account Committed by Gerrit - the friendly Code Review server
Browse files

Merge "mmc: cmdq: Add timeout in case of mmc_cmdq_halt_on_empty_queue()"

parents 2329a96c 6cbf39b0
Loading
Loading
Loading
Loading
+117 −36
Original line number Diff line number Diff line
@@ -1214,7 +1214,7 @@ static int mmc_blk_ioctl_cmd(struct block_device *bdev,
	mmc_get_card(card);

	if (mmc_card_cmdq(card)) {
		err = mmc_cmdq_halt_on_empty_queue(card->host);
		err = mmc_cmdq_halt_on_empty_queue(card->host, 0);
		if (err) {
			pr_err("%s: halt failed while doing %s err (%d)\n",
					mmc_hostname(card->host),
@@ -1867,11 +1867,6 @@ static struct mmc_cmdq_req *mmc_blk_cmdq_prep_discard_req(struct mmc_queue *mq,
	struct mmc_cmdq_req *cmdq_req;
	struct mmc_queue_req *active_mqrq;

	BUG_ON(req->tag > card->ext_csd.cmdq_depth);
	BUG_ON(test_and_set_bit(req->tag, &host->cmdq_ctx.active_reqs));

	set_bit(CMDQ_STATE_DCMD_ACTIVE, &ctx_info->curr_state);

	active_mqrq = &mq->mqrq_cmdq[req->tag];
	active_mqrq->req = req;

@@ -1879,6 +1874,17 @@ static struct mmc_cmdq_req *mmc_blk_cmdq_prep_discard_req(struct mmc_queue *mq,
	cmdq_req->cmdq_req_flags |= QBR;
	cmdq_req->mrq.cmd = &cmdq_req->cmd;
	cmdq_req->tag = req->tag;

	/*
	 * To avoid potential race condition with the error handler work,
	 * do the following:
	 * 1. set init_completion() only once
	 * 2. set the CMDQ_STATE_DCMD_ACTIVE only after it's tag is set
	 */
	init_completion(&cmdq_req->mrq.completion);
	WARN_ON(req->tag > card->ext_csd.cmdq_depth);
	WARN_ON(test_and_set_bit(req->tag, &host->cmdq_ctx.active_reqs));
	set_bit(CMDQ_STATE_DCMD_ACTIVE, &ctx_info->curr_state);
	return cmdq_req;
}

@@ -1922,8 +1928,21 @@ static int mmc_blk_cmdq_issue_discard_rq(struct mmc_queue *mq,
	}
	err = mmc_cmdq_erase(cmdq_req, card, from, nr, arg);
clear_dcmd:
	/*
	 * If some other request got an error while there is a DCMD request
	 * in the command queue, then err will be updated with -EAGAIN by the
	 * error handler, which indicates that caller must not call
	 * blk_complete_request() and let the request by handled by error
	 * hanlder. In all other cases, the caller only must call
	 * blk_complete_request().
	 */
	if (err != -EAGAIN) {
		mmc_host_clk_hold(card->host);
		blk_complete_request(req);
	} else {
		pr_err("%s: err(%d) handled by cmdq-error handler\n",
			__func__, err);
	}
out:
	return err ? 1 : 0;
}
@@ -2028,8 +2047,13 @@ static int mmc_blk_cmdq_issue_secdiscard_rq(struct mmc_queue *mq,
				MMC_SECURE_TRIM2_ARG);
	}
clear_dcmd:
	if (err != -EAGAIN) {
		mmc_host_clk_hold(card->host);
		blk_complete_request(req);
	} else {
		pr_err("%s: err(%d) handled by cmdq-error handler\n",
			__func__, err);
	}
out:
	return err ? 1 : 0;
}
@@ -3212,8 +3236,14 @@ static int mmc_blk_cmdq_issue_rw_rq(struct mmc_queue *mq, struct request *req)
	struct mmc_cmdq_req *mc_rq;
	u8 active_small_sector_read = 0;
	int ret = 0;
	unsigned long timeout_ms = 10000; /* 10 sec safe timeout */

	mmc_cmdq_up_rwsem(host);
	mmc_deferred_scaling(host, timeout_ms);
	ret = mmc_cmdq_down_rwsem(host, req);
	if (ret)
		return ret;

	mmc_deferred_scaling(host);
	mmc_cmdq_clk_scaling_start_busy(host, true);

	BUG_ON((req->tag < 0) || (req->tag > card->ext_csd.cmdq_depth));
@@ -3246,9 +3276,18 @@ static int mmc_blk_cmdq_issue_rw_rq(struct mmc_queue *mq, struct request *req)
	 * empty faster and we will be able to scale up to Nominal frequency
	 * when needed.
	 */
	if (!ret && (host->clk_scaling.state == MMC_LOAD_LOW))
		wait_event_interruptible(ctx->queue_empty_wq,
					(!ctx->active_reqs));

	if (!ret && (host->clk_scaling.state == MMC_LOAD_LOW)) {

		ret = wait_event_interruptible_timeout(ctx->queue_empty_wq,
				(!ctx->active_reqs &&
				 !test_bit(CMDQ_STATE_ERR, &ctx->curr_state)),
				msecs_to_jiffies(5000));
		if (!ret)
			pr_err("%s: queue_empty_wq timeout case? ret = (%d)\n",
				__func__, ret);
		ret = 0;
	}

	if (ret) {
		/* clear pending request */
@@ -3339,20 +3378,17 @@ static void mmc_blk_cmdq_reset(struct mmc_host *host, bool clear_all)
}

/**
 * is_cmdq_dcmd_req - Checks if tag belongs to DCMD request.
 * get_cmdq_req_by_tag - returns cmdq_rq based on tag.
 * @q:		request_queue pointer.
 * @tag:	tag number of request to check.
 *
 * This function checks if the request with tag number "tag"
 * is a DCMD request or not based on cmdq_req_flags set.
 *
 * returns true if DCMD req, otherwise false.
 */
static bool is_cmdq_dcmd_req(struct request_queue *q, int tag)
static struct mmc_cmdq_req *get_cmdq_req_by_tag(struct request_queue *q,
						int tag)
{
	struct request *req;
	struct mmc_queue_req *mq_rq;
	struct mmc_cmdq_req *cmdq_req;
	struct mmc_cmdq_req *cmdq_req = NULL;

	req = blk_queue_find_tag(q, tag);
	if (WARN_ON(!req))
@@ -3361,9 +3397,8 @@ static bool is_cmdq_dcmd_req(struct request_queue *q, int tag)
	if (WARN_ON(!mq_rq))
		goto out;
	cmdq_req = &(mq_rq->cmdq_req);
	return (cmdq_req->cmdq_req_flags & DCMD);
out:
	return -ENOENT;
	return cmdq_req;
}

/**
@@ -3383,7 +3418,9 @@ static void mmc_blk_cmdq_reset_all(struct mmc_host *host, int err)
	struct mmc_cmdq_context_info *ctx_info = &host->cmdq_ctx;
	struct request_queue *q;
	int itag = 0;
	int ret = 0;
	struct mmc_cmdq_req *cmdq_req = NULL;
	struct mmc_request *dcmd_mrq;
	bool is_err_mrq_dcmd = false;

	if (WARN_ON(!mrq))
		return;
@@ -3399,18 +3436,31 @@ static void mmc_blk_cmdq_reset_all(struct mmc_host *host, int err)

	mmc_blk_cmdq_reset(host, false);

	if (mrq->cmdq_req->cmdq_req_flags & DCMD)
		is_err_mrq_dcmd = true;

	for_each_set_bit(itag, &ctx_info->active_reqs,
			host->num_cq_slots) {
		ret = is_cmdq_dcmd_req(q, itag);
		if (WARN_ON(ret == -ENOENT))
		cmdq_req = get_cmdq_req_by_tag(q, itag);
		if (WARN_ON(!cmdq_req))
			continue;
		if (!ret) {
		if (!(cmdq_req->cmdq_req_flags & DCMD)) {
			WARN_ON(!test_and_clear_bit(itag,
				 &ctx_info->data_active_reqs));
			mmc_cmdq_post_req(host, itag, err);
		} else {
			clear_bit(CMDQ_STATE_DCMD_ACTIVE,
					&ctx_info->curr_state);
			dcmd_mrq = &cmdq_req->mrq;
			WARN_ON(!test_and_clear_bit(CMDQ_STATE_DCMD_ACTIVE,
					&ctx_info->curr_state));
			pr_debug("%s: cmd(%u), req_op(%llu)\n", __func__,
				 dcmd_mrq->cmd->opcode, req_op(dcmd_mrq->req));
			if (!is_err_mrq_dcmd && !dcmd_mrq->cmd->error &&
				(req_op(dcmd_mrq->req) == REQ_OP_SECURE_ERASE ||
				 req_op(dcmd_mrq->req) == REQ_OP_DISCARD)) {
				dcmd_mrq->cmd->error = -EAGAIN;
				complete(&dcmd_mrq->completion);
			}

		}
		WARN_ON(!test_and_clear_bit(itag,
					&ctx_info->active_reqs));
@@ -3538,6 +3588,7 @@ static void mmc_blk_cmdq_err(struct mmc_queue *mq)
	if (WARN_ON(!mrq))
		return;

	down_write(&ctx_info->err_rwsem);
	q = mrq->req->q;
	err = mmc_cmdq_halt(host, true);
	if (err) {
@@ -3590,6 +3641,24 @@ static void mmc_blk_cmdq_err(struct mmc_queue *mq)
	host->err_mrq = NULL;
	clear_bit(CMDQ_STATE_REQ_TIMED_OUT, &ctx_info->curr_state);
	WARN_ON(!test_and_clear_bit(CMDQ_STATE_ERR, &ctx_info->curr_state));

#ifdef CONFIG_MMC_CLKGATE
	pr_err("%s: clk-rqs(%d), claim-cnt(%d), claimed(%d), claimer(%s)\n",
		__func__, host->clk_requests, host->claim_cnt, host->claimed,
		host->claimer->comm);
#else
	pr_err("%s: claim-cnt(%d), claimed(%d), claimer(%s)\n", __func__,
			host->claim_cnt, host->claimed, host->claimer->comm);
#endif
	sched_show_task(mq->thread);
	if (host->claimed && host->claimer)
		sched_show_task(host->claimer);
#ifdef CONFIG_MMC_CLKGATE
	WARN_ON(host->clk_requests < 0);
#endif
	WARN_ON(host->claim_cnt < 0);

	up_write(&ctx_info->err_rwsem);
	wake_up(&ctx_info->wait);
}

@@ -3604,6 +3673,16 @@ void mmc_blk_cmdq_complete_rq(struct request *rq)
	struct mmc_queue *mq = (struct mmc_queue *)rq->q->queuedata;
	int err = 0;
	bool is_dcmd = false;
	bool err_rwsem = false;

	if (down_read_trylock(&ctx_info->err_rwsem)) {
		err_rwsem = true;
	} else {
		pr_err("%s: err_rwsem lock failed to acquire => err handler active\n",
		    __func__);
		WARN_ON_ONCE(!test_bit(CMDQ_STATE_ERR, &ctx_info->curr_state));
		goto out;
	}

	if (mrq->cmd && mrq->cmd->error)
		err = mrq->cmd->error;
@@ -3625,12 +3704,6 @@ void mmc_blk_cmdq_complete_rq(struct request *rq)
		}
		goto out;
	}
	/*
	 * In case of error CMDQ is expected to be either in halted
	 * or disable state so cannot receive any completion of
	 * other requests.
	 */
	WARN_ON(test_bit(CMDQ_STATE_ERR, &ctx_info->curr_state));

	/* clear pending request */
	BUG_ON(!test_and_clear_bit(cmdq_req->tag,
@@ -3664,9 +3737,10 @@ void mmc_blk_cmdq_complete_rq(struct request *rq)
out:

	mmc_cmdq_clk_scaling_stop_busy(host, true, is_dcmd);
	if (!(err || cmdq_req->resp_err)) {
	if (err_rwsem && !(err || cmdq_req->resp_err)) {
		mmc_host_clk_release(host);
		wake_up(&ctx_info->wait);
		host->last_completed_rq_time = ktime_get();
		mmc_put_card(host->card);
	}

@@ -3676,6 +3750,8 @@ void mmc_blk_cmdq_complete_rq(struct request *rq)
	if (blk_queue_stopped(mq->queue) && !ctx_info->active_reqs)
		complete(&mq->cmdq_shutdown_complete);

	if (err_rwsem)
		up_read(&ctx_info->err_rwsem);
	return;
}

@@ -4056,6 +4132,7 @@ static int mmc_blk_cmdq_issue_rq(struct mmc_queue *mq, struct request *req)
		if (mmc_req_is_special(req) &&
		    (card->quirks & MMC_QUIRK_CMDQ_EMPTY_BEFORE_DCMD) &&
		    ctx->active_small_sector_read_reqs) {
			mmc_cmdq_up_rwsem(host);
			ret = wait_event_interruptible(ctx->queue_empty_wq,
						      !ctx->active_reqs);
			if (ret) {
@@ -4064,6 +4141,10 @@ static int mmc_blk_cmdq_issue_rq(struct mmc_queue *mq, struct request *req)
					__func__, ret);
				BUG_ON(1);
			}
			ret = mmc_cmdq_down_rwsem(host, req);
			if (ret)
				return ret;

			/* clear the counter now */
			ctx->active_small_sector_read_reqs = 0;
			/*
+8 −0
Original line number Diff line number Diff line
@@ -133,7 +133,14 @@ static int mmc_cmdq_thread(void *d)
		if (kthread_should_stop())
			break;

		ret = mmc_cmdq_down_rwsem(host, mq->cmdq_req_peeked);
		if (ret) {
			mmc_cmdq_up_rwsem(host);
			continue;
		}
		ret = mq->cmdq_issue_fn(mq, mq->cmdq_req_peeked);
		mmc_cmdq_up_rwsem(host);

		/*
		 * Don't requeue if issue_fn fails.
		 * Recovery will be come by completion softirq
@@ -645,6 +652,7 @@ int mmc_cmdq_init(struct mmc_queue *mq, struct mmc_card *card)

	init_waitqueue_head(&card->host->cmdq_ctx.queue_empty_wq);
	init_waitqueue_head(&card->host->cmdq_ctx.wait);
	init_rwsem(&card->host->cmdq_ctx.err_rwsem);

	mq->mqrq_cmdq = kzalloc(
			sizeof(struct mmc_queue_req) * q_depth, GFP_KERNEL);
+57 −12
Original line number Diff line number Diff line
@@ -137,6 +137,34 @@ static bool mmc_is_data_request(struct mmc_request *mmc_request)
	}
}

void mmc_cmdq_up_rwsem(struct mmc_host *host)
{
	struct mmc_cmdq_context_info *ctx = &host->cmdq_ctx;

	up_read(&ctx->err_rwsem);
}
EXPORT_SYMBOL(mmc_cmdq_up_rwsem);

int mmc_cmdq_down_rwsem(struct mmc_host *host, struct request *rq)
{
	struct mmc_cmdq_context_info *ctx = &host->cmdq_ctx;

	down_read(&ctx->err_rwsem);
	/*
	 * This is to prevent a case where issue context has already
	 * called blk_queue_start_tag(), immediately after which error
	 * handler work has run and called blk_queue_invalidate_tags().
	 * In this case, the issue context should check for REQ_QUEUED
	 * before proceeding with that request. It should ideally call
	 * blk_queue_start_tag() again on the requeued request.
	 */
	if (!(rq->cmd_flags & REQ_QUEUED))
		return -EINVAL;
	else
		return 0;
}
EXPORT_SYMBOL(mmc_cmdq_down_rwsem);

static void mmc_clk_scaling_start_busy(struct mmc_host *host, bool lock_needed)
{
	struct mmc_devfeq_clk_scaling *clk_scaling = &host->clk_scaling;
@@ -345,12 +373,23 @@ static bool mmc_is_valid_state_for_clk_scaling(struct mmc_host *host)
	return R1_CURRENT_STATE(status) == R1_STATE_TRAN;
}

int mmc_cmdq_halt_on_empty_queue(struct mmc_host *host)
int mmc_cmdq_halt_on_empty_queue(struct mmc_host *host, unsigned long timeout)
{
	int err = 0;

	if (!timeout) {
		err = wait_event_interruptible(host->cmdq_ctx.queue_empty_wq,
					(!host->cmdq_ctx.active_reqs));
	} else {
		err = wait_event_interruptible_timeout(
				host->cmdq_ctx.queue_empty_wq,
				(!host->cmdq_ctx.active_reqs),
				msecs_to_jiffies(timeout));
		if (!err)
			pr_err("%s: halt_on_empty_queue timeout case: err(%d)\n",
					__func__, err);
	}

	if (host->cmdq_ctx.active_reqs) {
		pr_err("%s: %s: unexpected active requests (%lu)\n",
			mmc_hostname(host), __func__,
@@ -371,7 +410,8 @@ int mmc_cmdq_halt_on_empty_queue(struct mmc_host *host)
EXPORT_SYMBOL(mmc_cmdq_halt_on_empty_queue);

int mmc_clk_update_freq(struct mmc_host *host,
		unsigned long freq, enum mmc_load state)
		unsigned long freq, enum mmc_load state,
		unsigned long timeout)
{
	int err = 0;
	bool cmdq_mode;
@@ -413,7 +453,7 @@ int mmc_clk_update_freq(struct mmc_host *host,
	}

	if (cmdq_mode) {
		err = mmc_cmdq_halt_on_empty_queue(host);
		err = mmc_cmdq_halt_on_empty_queue(host, timeout);
		if (err) {
			pr_err("%s: %s: failed halting queue (%d)\n",
				mmc_hostname(host), __func__, err);
@@ -427,12 +467,16 @@ int mmc_clk_update_freq(struct mmc_host *host,
		goto invalid_state;
	}

	MMC_TRACE(host, "clock scale state %d freq %lu\n",
			state, freq);
	err = host->bus_ops->change_bus_speed(host, &freq);
	if (!err)
		host->clk_scaling.curr_freq = freq;
	else
		pr_err("%s: %s: failed (%d) at freq=%lu\n",
			mmc_hostname(host), __func__, err, freq);
	MMC_TRACE(host, "clock scale state %d freq %lu done with err %d\n",
			state, freq, err);

invalid_state:
	if (cmdq_mode) {
@@ -542,7 +586,7 @@ static int mmc_devfreq_set_target(struct device *dev,
	clk_scaling->need_freq_change = false;

	mmc_host_clk_hold(host);
	err = mmc_clk_update_freq(host, *freq, clk_scaling->state);
	err = mmc_clk_update_freq(host, *freq, clk_scaling->state, 0);
	if (err && err != -EAGAIN) {
		pr_err("%s: clock scale to %lu failed with error %d\n",
			mmc_hostname(host), *freq, err);
@@ -568,7 +612,7 @@ static int mmc_devfreq_set_target(struct device *dev,
 * This function does clock scaling in case "need_freq_change" flag was set
 * by the clock scaling logic.
 */
void mmc_deferred_scaling(struct mmc_host *host)
void mmc_deferred_scaling(struct mmc_host *host, unsigned long timeout)
{
	unsigned long target_freq;
	int err;
@@ -598,7 +642,7 @@ void mmc_deferred_scaling(struct mmc_host *host)
				target_freq, current->comm);

	err = mmc_clk_update_freq(host, target_freq,
		host->clk_scaling.state);
		host->clk_scaling.state, timeout);
	if (err && err != -EAGAIN) {
		pr_err("%s: failed on deferred scale clocks (%d)\n",
			mmc_hostname(host), err);
@@ -1204,7 +1248,7 @@ static int mmc_start_request(struct mmc_host *host, struct mmc_request *mrq)
	led_trigger_event(host->led, LED_FULL);

	if (mmc_is_data_request(mrq)) {
		mmc_deferred_scaling(host);
		mmc_deferred_scaling(host, 0);
		mmc_clk_scaling_start_busy(host, true);
	}

@@ -1844,14 +1888,15 @@ int mmc_cmdq_wait_for_dcmd(struct mmc_host *host,
	struct mmc_command *cmd = mrq->cmd;
	int err = 0;

	init_completion(&mrq->completion);
	mrq->done = mmc_cmdq_dcmd_req_done;
	err = mmc_cmdq_start_req(host, cmdq_req);
	if (err)
		return err;

	mmc_cmdq_up_rwsem(host);
	wait_for_completion_io(&mrq->completion);
	if (cmd->error) {
	err = mmc_cmdq_down_rwsem(host, mrq->req);
	if (err || cmd->error) {
		pr_err("%s: DCMD %d failed with err %d\n",
				mmc_hostname(host), cmd->opcode,
				cmd->error);
@@ -3720,7 +3765,7 @@ static int mmc_cmdq_send_erase_cmd(struct mmc_cmdq_req *cmdq_req,
	if (err) {
		pr_err("mmc_erase: group start error %d, status %#x\n",
				err, cmd->resp[0]);
		return -EIO;
		return err;
	}
	return 0;
}
+2 −1
Original line number Diff line number Diff line
@@ -26,7 +26,8 @@ void mmc_init_erase(struct mmc_card *card);
void mmc_set_chip_select(struct mmc_host *host, int mode);
void mmc_set_clock(struct mmc_host *host, unsigned int hz);
int mmc_clk_update_freq(struct mmc_host *host,
		unsigned long freq, enum mmc_load state);
		unsigned long freq, enum mmc_load state,
		unsigned long timeout);
void mmc_gate_clock(struct mmc_host *host);
void mmc_ungate_clock(struct mmc_host *host);
void mmc_set_ungated(struct mmc_host *host);
+3 −3
Original line number Diff line number Diff line
@@ -275,7 +275,7 @@ static int mmc_scale_set(void *data, u64 val)
	mmc_host_clk_hold(host);

	/* change frequency from sysfs manually */
	err = mmc_clk_update_freq(host, val, host->clk_scaling.state);
	err = mmc_clk_update_freq(host, val, host->clk_scaling.state, 0);
	if (err == -EAGAIN)
		err = 0;
	else if (err)
@@ -547,7 +547,7 @@ static int mmc_dbg_card_status_get(void *data, u64 *val)

	mmc_get_card(card);
	if (mmc_card_cmdq(card)) {
		ret = mmc_cmdq_halt_on_empty_queue(card->host);
		ret = mmc_cmdq_halt_on_empty_queue(card->host, 0);
		if (ret) {
			pr_err("%s: halt failed while doing %s err (%d)\n",
					mmc_hostname(card->host), __func__,
@@ -589,7 +589,7 @@ static int mmc_ext_csd_open(struct inode *inode, struct file *filp)

	mmc_get_card(card);
	if (mmc_card_cmdq(card)) {
		err = mmc_cmdq_halt_on_empty_queue(card->host);
		err = mmc_cmdq_halt_on_empty_queue(card->host, 0);
		if (err) {
			pr_err("%s: halt failed while doing %s err (%d)\n",
					mmc_hostname(card->host), __func__,
Loading