Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 5325b9fb authored by Linux Build Service Account's avatar Linux Build Service Account Committed by Gerrit - the friendly Code Review server
Browse files

Merge "mmc: cmdq: add clock scaling for CMDQ mode"

parents 52e2f335 dcdc7a57
Loading
Loading
Loading
Loading
+20 −0
Original line number Original line Diff line number Diff line
@@ -2753,6 +2753,12 @@ static int mmc_blk_cmdq_issue_rw_rq(struct mmc_queue *mq, struct request *req)
	struct mmc_cmdq_req *mc_rq;
	struct mmc_cmdq_req *mc_rq;
	int ret = 0;
	int ret = 0;


	if (host->clk_scaling.enable) {
		mmc_deferred_scaling(host);
		mmc_cmdq_clk_scaling_start_busy(host, true);
		BUG_ON(test_and_set_bit(req->tag,
			&host->cmdq_ctx.data_active_reqs));
	}
	BUG_ON((req->tag < 0) || (req->tag > card->ext_csd.cmdq_depth));
	BUG_ON((req->tag < 0) || (req->tag > card->ext_csd.cmdq_depth));
	BUG_ON(test_and_set_bit(req->tag, &host->cmdq_ctx.active_reqs));
	BUG_ON(test_and_set_bit(req->tag, &host->cmdq_ctx.active_reqs));


@@ -2967,6 +2973,7 @@ void mmc_blk_cmdq_complete_rq(struct request *rq)
	struct mmc_cmdq_req *cmdq_req = &mq_rq->cmdq_req;
	struct mmc_cmdq_req *cmdq_req = &mq_rq->cmdq_req;
	struct mmc_queue *mq = (struct mmc_queue *)rq->q->queuedata;
	struct mmc_queue *mq = (struct mmc_queue *)rq->q->queuedata;
	int err = 0;
	int err = 0;
	bool is_dcmd = false;


	if (mrq->cmd && mrq->cmd->error)
	if (mrq->cmd && mrq->cmd->error)
		err = mrq->cmd->error;
		err = mrq->cmd->error;
@@ -2977,6 +2984,14 @@ void mmc_blk_cmdq_complete_rq(struct request *rq)
	BUG_ON(!test_and_clear_bit(cmdq_req->tag,
	BUG_ON(!test_and_clear_bit(cmdq_req->tag,
				   &ctx_info->active_reqs));
				   &ctx_info->active_reqs));


	if (host->clk_scaling.enable) {
		if (cmdq_req->cmdq_req_flags & DCMD)
			is_dcmd = true;
		else
			BUG_ON(!test_and_clear_bit(cmdq_req->tag,
				 &ctx_info->data_active_reqs));
	}

	mmc_cmdq_post_req(host, mrq, err);
	mmc_cmdq_post_req(host, mrq, err);
	if (err) {
	if (err) {
		pr_err("%s: %s: txfr error: %d\n", mmc_hostname(mrq->host),
		pr_err("%s: %s: txfr error: %d\n", mmc_hostname(mrq->host),
@@ -3001,11 +3016,16 @@ void mmc_blk_cmdq_complete_rq(struct request *rq)
	blk_end_request(rq, err, cmdq_req->data.bytes_xfered);
	blk_end_request(rq, err, cmdq_req->data.bytes_xfered);


out:
out:

	mmc_cmdq_clk_scaling_stop_busy(host, true, is_dcmd);
	if (!test_bit(CMDQ_STATE_ERR, &ctx_info->curr_state) &&
	if (!test_bit(CMDQ_STATE_ERR, &ctx_info->curr_state) &&
			test_and_clear_bit(0, &ctx_info->req_starved))
			test_and_clear_bit(0, &ctx_info->req_starved))
		blk_run_queue(mq->queue);
		blk_run_queue(mq->queue);


	mmc_put_card(host->card);
	mmc_put_card(host->card);
	if (!ctx_info->active_reqs)
		wake_up_interruptible(&host->cmdq_ctx.queue_empty_wq);

	if (blk_queue_stopped(mq->queue) && !ctx_info->active_reqs)
	if (blk_queue_stopped(mq->queue) && !ctx_info->active_reqs)
		complete(&mq->cmdq_shutdown_complete);
		complete(&mq->cmdq_shutdown_complete);
	return;
	return;
+1 −0
Original line number Original line Diff line number Diff line
@@ -607,6 +607,7 @@ int mmc_cmdq_init(struct mmc_queue *mq, struct mmc_card *card)
		goto out;
		goto out;
	}
	}


	init_waitqueue_head(&card->host->cmdq_ctx.queue_empty_wq);
	mq->mqrq_cmdq = kzalloc(
	mq->mqrq_cmdq = kzalloc(
			sizeof(struct mmc_queue_req) * q_depth, GFP_KERNEL);
			sizeof(struct mmc_queue_req) * q_depth, GFP_KERNEL);
	if (!mq->mqrq_cmdq) {
	if (!mq->mqrq_cmdq) {
+136 −3
Original line number Original line Diff line number Diff line
@@ -185,6 +185,81 @@ out:
		spin_unlock_bh(&clk_scaling->lock);
		spin_unlock_bh(&clk_scaling->lock);
}
}


/**
 * mmc_cmdq_clk_scaling_start_busy() - start busy timer for data requests
 * @host: pointer to mmc host structure
 * @lock_needed: flag indication if locking is needed
 *
 * This function starts the busy timer in case it was not already started.
 */
void mmc_cmdq_clk_scaling_start_busy(struct mmc_host *host,
	bool lock_needed)
{
	if (!host->clk_scaling.enable)
		return;

	if (lock_needed)
		spin_lock_bh(&host->clk_scaling.lock);

	if (!host->clk_scaling.is_busy_started &&
		!test_bit(CMDQ_STATE_DCMD_ACTIVE,
			&host->cmdq_ctx.curr_state)) {
		host->clk_scaling.start_busy = ktime_get();
		host->clk_scaling.is_busy_started = true;
	}

	if (lock_needed)
		spin_unlock_bh(&host->clk_scaling.lock);
}
EXPORT_SYMBOL(mmc_cmdq_clk_scaling_start_busy);

/**
 * mmc_cmdq_clk_scaling_stop_busy() - stop busy timer for last data requests
 * @host: pointer to mmc host structure
 * @lock_needed: flag indication if locking is needed
 *
 * This function stops the busy timer in case it is the last data request.
 * In case the current request is not the last one, the busy time till
 * now will be accumulated and the counter will be restarted.
 */
void mmc_cmdq_clk_scaling_stop_busy(struct mmc_host *host,
	bool lock_needed, bool is_cmdq_dcmd)
{
	if (!host->clk_scaling.enable)
		return;

	if (lock_needed)
		spin_lock_bh(&host->clk_scaling.lock);

	/*
	 *  For CQ mode: In completion of DCMD request, start busy time in
	 *  case of pending data requests
	 */
	if (is_cmdq_dcmd) {
		if (host->cmdq_ctx.data_active_reqs) {
			host->clk_scaling.is_busy_started = true;
			host->clk_scaling.start_busy = ktime_get();
		}
		goto out;
	}

	host->clk_scaling.total_busy_time_us +=
		ktime_to_us(ktime_sub(ktime_get(),
			host->clk_scaling.start_busy));

	if (host->cmdq_ctx.data_active_reqs) {
		host->clk_scaling.is_busy_started = true;
		host->clk_scaling.start_busy = ktime_get();
	} else {
		host->clk_scaling.is_busy_started = false;
	}
out:
	if (lock_needed)
		spin_unlock_bh(&host->clk_scaling.lock);

}
EXPORT_SYMBOL(mmc_cmdq_clk_scaling_stop_busy);

/**
/**
 * mmc_disable_devfreq_clk_scaling() - Disable clock scaling
 * mmc_disable_devfreq_clk_scaling() - Disable clock scaling
 * @host: pointer to mmc host structure
 * @host: pointer to mmc host structure
@@ -242,9 +317,15 @@ static int mmc_devfreq_get_dev_status(struct device *dev,
	/* accumulate the busy time of ongoing work */
	/* accumulate the busy time of ongoing work */
	memset(status, 0, sizeof(*status));
	memset(status, 0, sizeof(*status));
	if (clk_scaling->is_busy_started) {
	if (clk_scaling->is_busy_started) {
		if (mmc_card_cmdq(host->card)) {
			/* the "busy-timer" will be restarted in case there
			 * are pending data requests */
			mmc_cmdq_clk_scaling_stop_busy(host, false, false);
		} else {
			mmc_clk_scaling_stop_busy(host, false);
			mmc_clk_scaling_stop_busy(host, false);
			mmc_clk_scaling_start_busy(host, false);
			mmc_clk_scaling_start_busy(host, false);
		}
		}
	}


	status->busy_time = clk_scaling->total_busy_time_us;
	status->busy_time = clk_scaling->total_busy_time_us;
	status->total_time = ktime_to_us(ktime_sub(ktime_get(),
	status->total_time = ktime_to_us(ktime_sub(ktime_get(),
@@ -286,10 +367,35 @@ static bool mmc_is_vaild_state_for_clk_scaling(struct mmc_host *host)
	return R1_CURRENT_STATE(status) == R1_STATE_TRAN;
	return R1_CURRENT_STATE(status) == R1_STATE_TRAN;
}
}


static int mmc_cmdq_halt_on_empty_queue(struct mmc_host *host)
{
	int err = 0;

	err = wait_event_interruptible(host->cmdq_ctx.queue_empty_wq,
				(!host->cmdq_ctx.active_reqs));
	if (host->cmdq_ctx.active_reqs) {
		pr_err("%s: %s: unexpected active requests (%lu)\n",
			mmc_hostname(host), __func__,
			host->cmdq_ctx.active_reqs);
		return -EPERM;
	}

	err = mmc_cmdq_halt(host, true);
	if (err) {
		pr_err("%s: %s: mmc_cmdq_halt failed (%d)\n",
		       mmc_hostname(host), __func__, err);
		goto out;
	}

out:
	return err;
}

int mmc_clk_update_freq(struct mmc_host *host,
int mmc_clk_update_freq(struct mmc_host *host,
		unsigned long freq, enum mmc_load state)
		unsigned long freq, enum mmc_load state)
{
{
	int err = 0;
	int err = 0;
	bool cmdq_mode;


	if (!host) {
	if (!host) {
		pr_err("bad host parameter\n");
		pr_err("bad host parameter\n");
@@ -297,6 +403,9 @@ int mmc_clk_update_freq(struct mmc_host *host,
		return -EINVAL;
		return -EINVAL;
	}
	}


	mmc_host_clk_hold(host);
	cmdq_mode = mmc_card_cmdq(host->card);

	/* make sure the card supports the frequency we want */
	/* make sure the card supports the frequency we want */
	if (unlikely(freq > host->card->clk_scaling_highest)) {
	if (unlikely(freq > host->card->clk_scaling_highest)) {
		freq = host->card->clk_scaling_highest;
		freq = host->card->clk_scaling_highest;
@@ -322,6 +431,15 @@ int mmc_clk_update_freq(struct mmc_host *host,
	}
	}


	if (freq != host->clk_scaling.curr_freq) {
	if (freq != host->clk_scaling.curr_freq) {
		if (cmdq_mode) {
			err = mmc_cmdq_halt_on_empty_queue(host);
			if (err) {
				pr_err("%s: %s: failed halting queue (%d)\n",
					mmc_hostname(host), __func__, err);
				goto error;
			}
		}

		if (!mmc_is_vaild_state_for_clk_scaling(host)) {
		if (!mmc_is_vaild_state_for_clk_scaling(host)) {
			pr_debug("%s: invalid state for clock scaling - skipping",
			pr_debug("%s: invalid state for clock scaling - skipping",
				mmc_hostname(host));
				mmc_hostname(host));
@@ -335,6 +453,12 @@ int mmc_clk_update_freq(struct mmc_host *host,
		else
		else
			pr_err("%s: %s: failed (%d) at freq=%lu\n",
			pr_err("%s: %s: failed (%d) at freq=%lu\n",
				mmc_hostname(host), __func__, err, freq);
				mmc_hostname(host), __func__, err, freq);

		if (cmdq_mode) {
			if (mmc_cmdq_halt(host, false))
				pr_err("%s: %s: cmdq unhalt failed\n",
				mmc_hostname(host), __func__);
		}
	}
	}
error:
error:
	if (err) {
	if (err) {
@@ -346,6 +470,7 @@ error:
					mmc_hostname(host), __func__);
					mmc_hostname(host), __func__);
	}
	}
out:
out:
	mmc_host_clk_release(host);
	return err;
	return err;
}
}
EXPORT_SYMBOL(mmc_clk_update_freq);
EXPORT_SYMBOL(mmc_clk_update_freq);
@@ -418,7 +543,14 @@ out:
	return err;
	return err;
}
}


static void mmc_deferred_scaling(struct mmc_host *host)
/**
 * mmc_deferred_scaling() - scale clocks from data path (mmc thread context)
 * @host: pointer to mmc host structure
 *
 * This function does clock scaling in case "need_freq_change" flag was set
 * by the clock scaling logic.
 */
void mmc_deferred_scaling(struct mmc_host *host)
{
{
	unsigned long target_freq;
	unsigned long target_freq;
	int err;
	int err;
@@ -456,6 +588,7 @@ static void mmc_deferred_scaling(struct mmc_host *host)
	host->clk_scaling.clk_scaling_in_progress = false;
	host->clk_scaling.clk_scaling_in_progress = false;
	atomic_dec(&host->clk_scaling.devfreq_abort);
	atomic_dec(&host->clk_scaling.devfreq_abort);
}
}
EXPORT_SYMBOL(mmc_deferred_scaling);


static int mmc_devfreq_create_freq_table(struct mmc_host *host)
static int mmc_devfreq_create_freq_table(struct mmc_host *host)
{
{
+6 −0
Original line number Original line Diff line number Diff line
@@ -210,6 +210,12 @@ extern int mmc_detect_card_removed(struct mmc_host *host);


extern void mmc_blk_init_bkops_statistics(struct mmc_card *card);
extern void mmc_blk_init_bkops_statistics(struct mmc_card *card);


extern void mmc_deferred_scaling(struct mmc_host *host);
extern void mmc_cmdq_clk_scaling_start_busy(struct mmc_host *host,
	bool lock_needed);
extern void mmc_cmdq_clk_scaling_stop_busy(struct mmc_host *host,
	bool lock_needed, bool is_cmdq_dcmd);

/**
/**
 *	mmc_claim_host - exclusively claim a host
 *	mmc_claim_host - exclusively claim a host
 *	@host: mmc host to claim
 *	@host: mmc host to claim
+5 −0
Original line number Original line Diff line number Diff line
@@ -235,19 +235,24 @@ struct mmc_slot {
/**
/**
 * mmc_cmdq_context_info - describes the contexts of cmdq
 * mmc_cmdq_context_info - describes the contexts of cmdq
 * @active_reqs		requests being processed
 * @active_reqs		requests being processed
 * @data_active_reqs	data requests being processed
 * @curr_state		state of cmdq engine
 * @curr_state		state of cmdq engine
 * @req_starved		completion should invoke the request_fn since
 * @req_starved		completion should invoke the request_fn since
 *			no tags were available
 *			no tags were available
 * @cmdq_ctx_lock	acquire this before accessing this structure
 * @cmdq_ctx_lock	acquire this before accessing this structure
 * @queue_empty_wq	workqueue for waiting for all
 *			the outstanding requests to be completed
 */
 */
struct mmc_cmdq_context_info {
struct mmc_cmdq_context_info {
	unsigned long	active_reqs; /* in-flight requests */
	unsigned long	active_reqs; /* in-flight requests */
	unsigned long	data_active_reqs; /* in-flight data requests */
	unsigned long	curr_state;
	unsigned long	curr_state;
#define	CMDQ_STATE_ERR 0
#define	CMDQ_STATE_ERR 0
#define	CMDQ_STATE_DCMD_ACTIVE 1
#define	CMDQ_STATE_DCMD_ACTIVE 1
#define	CMDQ_STATE_HALT 2
#define	CMDQ_STATE_HALT 2
	/* no free tag available */
	/* no free tag available */
	unsigned long	req_starved;
	unsigned long	req_starved;
	wait_queue_head_t	queue_empty_wq;
};
};


/**
/**