Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 674da820 authored by Ram Prakash Gupta's avatar Ram Prakash Gupta
Browse files

mmc: Enable clk scaling for cqe



Enable clk scaling for cqe.

Change-Id: I99e87a808e4d7663114736e45614a8082dda0eb1
Signed-off-by: default avatarRam Prakash Gupta <rampraka@codeaurora.org>
parent 6998de7d
Loading
Loading
Loading
Loading
+21 −0
Original line number Diff line number Diff line
@@ -1458,6 +1458,9 @@ static void mmc_blk_cqe_complete_rq(struct mmc_queue *mq, struct request *req)
	spin_lock_irqsave(&mq->lock, flags);

	mq->in_flight[issue_type] -= 1;
#if defined(CONFIG_SDC_QTI)
	atomic_dec(&host->active_reqs);
#endif

	put_card = (mmc_tot_in_flight(mq) == 0);

@@ -1467,6 +1470,10 @@ static void mmc_blk_cqe_complete_rq(struct mmc_queue *mq, struct request *req)

	if (!mq->cqe_busy)
		blk_mq_run_hw_queues(q, true);
#if defined(CONFIG_SDC_QTI)
	mmc_cqe_clk_scaling_stop_busy(host, true,
				(issue_type == MMC_ISSUE_DCMD));
#endif

	if (put_card)
		mmc_put_card(mq->card, &mq->ctx);
@@ -1563,13 +1570,27 @@ static int mmc_blk_cqe_issue_rw_rq(struct mmc_queue *mq, struct request *req)
{
	struct mmc_queue_req *mqrq = req_to_mmc_queue_req(req);
	struct mmc_host *host = mq->card->host;
#if defined(CONFIG_SDC_QTI)
	int err;
#endif

	if (host->hsq_enabled)
		return mmc_blk_hsq_issue_rw_rq(mq, req);

	mmc_blk_data_prep(mq, mqrq, 0, NULL, NULL);
#if defined(CONFIG_SDC_QTI)
	mmc_deferred_scaling(mq->card->host);
	mmc_cqe_clk_scaling_start_busy(mq, mq->card->host, true);

	err =  mmc_blk_cqe_start_req(mq->card->host, &mqrq->brq.mrq);

	if (err)
		mmc_cqe_clk_scaling_stop_busy(mq->card->host, true, false);

	return err;
#else
	return mmc_blk_cqe_start_req(mq->card->host, &mqrq->brq.mrq);
#endif
}

static void mmc_blk_rw_rq_prep(struct mmc_queue_req *mqrq,
+113 −5
Original line number Diff line number Diff line
@@ -176,6 +176,89 @@ static void mmc_clk_scaling_stop_busy(struct mmc_host *host, bool lock_needed)
		spin_unlock_irqrestore(&clk_scaling->lock, flags);
}

/* mmc_cqe_clk_scaling_start_busy() - start busy timer for data requests
 * @host: pointer to mmc host structure
 * @lock_needed: flag indication if locking is needed
 *
 * This function starts the busy timer in case it was not already started.
 */
void mmc_cqe_clk_scaling_start_busy(struct mmc_queue *mq,
			struct mmc_host *host, bool lock_needed)
{
	unsigned long flags;

	if (!host->clk_scaling.enable)
		return;

	if (lock_needed)
		spin_lock_irqsave(&host->clk_scaling.lock, flags);

	if (!host->clk_scaling.is_busy_started &&
			!(mq->cqe_busy & MMC_CQE_DCMD_BUSY)) {
		host->clk_scaling.start_busy = ktime_get();
		host->clk_scaling.is_busy_started = true;
	}

	if (lock_needed)
		spin_unlock_irqrestore(&host->clk_scaling.lock, flags);
}
EXPORT_SYMBOL(mmc_cqe_clk_scaling_start_busy);

/**
 * mmc_cqe_clk_scaling_stop_busy() - stop busy timer for last data requests
 * @host: pointer to mmc host structure
 * @lock_needed: flag indication if locking is needed
 *
 * This function stops the busy timer in case it is the last data request.
 * In case the current request is not the last one, the busy time till
 * now will be accumulated and the counter will be restarted.
 */
void mmc_cqe_clk_scaling_stop_busy(struct mmc_host *host,
	bool lock_needed, bool is_cqe_dcmd)
{
	unsigned int cqe_active_reqs = 0;

	if (!host->clk_scaling.enable)
		return;

	cqe_active_reqs = atomic_read(&host->active_reqs);

	/*
	 * This gets invoked from CQE completion path which is hard IRQ context
	 * So use spin_lock() instread of spin_lock_irqsave()
	 */
	if (lock_needed)
		spin_lock(&host->clk_scaling.lock);

	/*
	 *  For CQ mode: In completion of DCMD request, start busy time in
	 *  case of pending data requests
	 */
	if (is_cqe_dcmd) {
		if (cqe_active_reqs && !host->clk_scaling.is_busy_started) {
			host->clk_scaling.is_busy_started = true;
			host->clk_scaling.start_busy = ktime_get();
		}
		goto out;
	}

	host->clk_scaling.total_busy_time_us +=
		ktime_to_us(ktime_sub(ktime_get(),
			host->clk_scaling.start_busy));

	if (cqe_active_reqs) {
		host->clk_scaling.is_busy_started = true;
		host->clk_scaling.start_busy = ktime_get();
	} else {
		host->clk_scaling.is_busy_started = false;
	}
out:
	if (lock_needed)
		spin_unlock(&host->clk_scaling.lock);

}
EXPORT_SYMBOL(mmc_cqe_clk_scaling_stop_busy);

/**
 * mmc_can_scale_clk() - Check clock scaling capability
 * @host: pointer to mmc host structure
@@ -215,9 +298,16 @@ static int mmc_devfreq_get_dev_status(struct device *dev,
	/* accumulate the busy time of ongoing work */
	memset(status, 0, sizeof(*status));
	if (clk_scaling->is_busy_started) {
		if (host->cqe_on) {
			/* the "busy-timer" will be restarted in case there
			 * are pending data requests
			 */
			mmc_cqe_clk_scaling_stop_busy(host, false, false);
		} else {
			mmc_clk_scaling_stop_busy(host, false);
			mmc_clk_scaling_start_busy(host, false);
		}
	}

	status->busy_time = clk_scaling->total_busy_time_us;
	status->total_time = ktime_to_us(ktime_sub(ktime_get(),
@@ -293,11 +383,20 @@ int mmc_clk_update_freq(struct mmc_host *host,
		if (err) {
			pr_err("%s: %s: CQE went in recovery path\n",
				mmc_hostname(host), __func__);
			goto out;
			goto error;
		}
		host->cqe_ops->cqe_off(host);
	}

	if (host->ops->notify_load) {
		err = host->ops->notify_load(host, state);
		if (err) {
			pr_err("%s: %s: fail on notify_load\n",
				mmc_hostname(host), __func__);
			goto error;
		}
	}

	if (!mmc_is_valid_state_for_clk_scaling(host)) {
		pr_debug("%s: invalid state for clock scaling - skipping\n",
			mmc_hostname(host));
@@ -315,6 +414,16 @@ int mmc_clk_update_freq(struct mmc_host *host,
	 * So no need to unhalt it explicitly
	 */

error:
	if (err) {
		/* restore previous state */
		if (host->ops->notify_load)
			if (host->ops->notify_load(host,
				host->clk_scaling.state))
				pr_err("%s: %s: fail on notify_load restore\n",
					mmc_hostname(host), __func__);
	}

out:
	return err;
}
@@ -428,8 +537,7 @@ void mmc_deferred_scaling(struct mmc_host *host)
				mmc_hostname(host),
				target_freq, current->comm);

	err = mmc_clk_update_freq(host, target_freq,
		clk_scaling.state);
	err = mmc_clk_update_freq(host, target_freq, clk_scaling.state);
	if (err && err != -EAGAIN)
		pr_err("%s: failed on deferred scale clocks (%d)\n",
			mmc_hostname(host), err);
+4 −0
Original line number Diff line number Diff line
@@ -106,6 +106,10 @@ extern int mmc_resume_clk_scaling(struct mmc_host *host);
extern int mmc_exit_clk_scaling(struct mmc_host *host);
extern void mmc_deferred_scaling(struct mmc_host *host);
extern unsigned long mmc_get_max_frequency(struct mmc_host *host);
extern void mmc_cqe_clk_scaling_start_busy(struct mmc_queue *mq,
	struct mmc_host *host, bool lock_needed);
extern void mmc_cqe_clk_scaling_stop_busy(struct mmc_host *host,
			bool lock_needed, bool is_cqe_dcmd);
#endif
int mmc_execute_tuning(struct mmc_card *card);
int mmc_hs200_to_hs400(struct mmc_card *card);
+86 −0
Original line number Diff line number Diff line
@@ -222,6 +222,83 @@ static int mmc_clock_opt_set(void *data, u64 val)
DEFINE_SIMPLE_ATTRIBUTE(mmc_clock_fops, mmc_clock_opt_get, mmc_clock_opt_set,
	"%llu\n");

#if defined(CONFIG_SDC_QTI)
static int mmc_scale_get(void *data, u64 *val)
{
	struct mmc_host *host = data;

	*val = host->clk_scaling.curr_freq;

	return 0;
}

static int mmc_scale_set(void *data, u64 val)
{
	int err = 0;
	struct mmc_host *host = data;

	mmc_claim_host(host);

	/* change frequency from sysfs manually */
	err = mmc_clk_update_freq(host, val, host->clk_scaling.state);
	if (err == -EAGAIN)
		err = 0;
	else if (err)
		pr_err("%s: clock scale to %llu failed with error %d\n",
			mmc_hostname(host), val, err);
	else
		pr_debug("%s: clock change to %llu finished successfully (%s)\n",
			mmc_hostname(host), val, current->comm);

	mmc_release_host(host);

	return err;
}

DEFINE_DEBUGFS_ATTRIBUTE(mmc_scale_fops, mmc_scale_get, mmc_scale_set,
	"%llu\n");

static int mmc_max_clock_get(void *data, u64 *val)
{
	struct mmc_host *host = data;

	if (!host)
		return -EINVAL;

	*val = host->f_max;

	return 0;
}

static int mmc_max_clock_set(void *data, u64 val)
{
	struct mmc_host *host = data;
	int err = -EINVAL;
	unsigned long freq = val;
	unsigned int old_freq;

	if (!host || (val < host->f_min))
		goto out;

	mmc_claim_host(host);
	if (host->bus_ops && host->bus_ops->change_bus_speed) {
		old_freq = host->f_max;
		host->f_max = freq;

		err = host->bus_ops->change_bus_speed(host, &freq);

		if (err)
			host->f_max = old_freq;
	}
	mmc_release_host(host);
out:
	return err;
}

DEFINE_DEBUGFS_ATTRIBUTE(mmc_max_clock_fops, mmc_max_clock_get,
		mmc_max_clock_set, "%llu\n");
#endif

void mmc_add_host_debugfs(struct mmc_host *host)
{
	struct dentry *root;
@@ -234,6 +311,15 @@ void mmc_add_host_debugfs(struct mmc_host *host)
	debugfs_create_x32("caps2", S_IRUSR, root, &host->caps2);
	debugfs_create_file("clock", S_IRUSR | S_IWUSR, root, host,
			    &mmc_clock_fops);
#if defined(CONFIG_SDC_QTI)
	debugfs_create_file("max_clock", 0600, root, host,
		&mmc_max_clock_fops);
	debugfs_create_file("scale", 0600, root, host,
		&mmc_scale_fops);
	debugfs_create_bool("skip_clk_scale_freq_update",
		0600, root,
		&host->clk_scaling.skip_clk_scale_freq_update);
#endif

#ifdef CONFIG_FAIL_MMC_REQUEST
	if (fail_request)
+301 −16
Original line number Diff line number Diff line
@@ -1539,6 +1539,235 @@ static int mmc_hs200_tuning(struct mmc_card *card)
	return mmc_execute_tuning(card);
}

#if defined(CONFIG_SDC_QTI)
static int mmc_select_hs_ddr52(struct mmc_host *host)
{
	int err;

	mmc_select_hs(host->card);
	err = mmc_select_bus_width(host->card);
	if (err < 0) {
		pr_err("%s: %s: select_bus_width failed(%d)\n",
			mmc_hostname(host), __func__, err);
		return err;
	}

	err = mmc_select_hs_ddr(host->card);
	mmc_set_clock(host, MMC_HIGH_52_MAX_DTR);

	return err;
}

/*
 * Scale down from HS400 to HS in order to allow frequency change.
 * This is needed for cards that doesn't support changing frequency in HS400
 */
static int mmc_scale_low(struct mmc_host *host, unsigned long freq)
{
	int err = 0;

	mmc_set_timing(host, MMC_TIMING_LEGACY);
	mmc_set_clock(host, MMC_HIGH_26_MAX_DTR);

	if (host->clk_scaling.lower_bus_speed_mode &
	    MMC_SCALING_LOWER_DDR52_MODE) {
		err = mmc_select_hs_ddr52(host);
		if (err)
			pr_err("%s: %s: failed to switch to DDR52: err: %d\n",
			       mmc_hostname(host), __func__, err);
		else
			return err;
	}

	err = mmc_select_hs(host->card);
	if (err) {
		pr_err("%s: %s: scaling low: failed (%d)\n",
		       mmc_hostname(host), __func__, err);
		return err;
	}

	err = mmc_select_bus_width(host->card);
	if (err < 0) {
		pr_err("%s: %s: select_bus_width failed(%d)\n",
			mmc_hostname(host), __func__, err);
		return err;
	}

	mmc_set_clock(host, freq);

	return 0;
}

/*
 * Scale UP from HS to HS200/H400
 */
static int mmc_scale_high(struct mmc_host *host)
{
	int err = 0;

	if (mmc_card_ddr52(host->card)) {
		mmc_set_timing(host, MMC_TIMING_LEGACY);
		mmc_set_clock(host, MMC_HIGH_26_MAX_DTR);
	}

	if (!host->card->ext_csd.strobe_support) {
		if (!(host->card->mmc_avail_type & EXT_CSD_CARD_TYPE_HS200)) {
			pr_err("%s: %s: card does not support HS200\n",
				mmc_hostname(host), __func__);
			WARN_ON(1);
			return -EPERM;
		}

		err = mmc_select_hs200(host->card);
		if (err) {
			pr_err("%s: %s: selecting HS200 failed (%d)\n",
				mmc_hostname(host), __func__, err);
			return err;
		}

		mmc_set_bus_speed(host->card);

		err = mmc_hs200_tuning(host->card);
		if (err) {
			pr_err("%s: %s: hs200 tuning failed (%d)\n",
				mmc_hostname(host), __func__, err);
			return err;
		}

		if (!(host->card->mmc_avail_type & EXT_CSD_CARD_TYPE_HS400)) {
			pr_debug("%s: card does not support HS400\n",
				mmc_hostname(host));
			return 0;
		}
	}

	mmc_set_initial_state(host);
	err = mmc_select_timing(host->card);
	if (err) {
		pr_err("%s: %s: select hs400 failed (%d)\n",
			mmc_hostname(host), __func__, err);
		return err;
	}

	return 0;
}

static int mmc_set_clock_bus_speed(struct mmc_card *card, unsigned long freq)
{
	int err = 0;

	if (freq == MMC_HS200_MAX_DTR)
		err = mmc_scale_high(card->host);
	else
		err = mmc_scale_low(card->host, freq);

	return err;
}

static inline unsigned long mmc_ddr_freq_accommodation(unsigned long freq)
{
	if (freq == MMC_HIGH_DDR_MAX_DTR)
		return freq;

	return freq/2;
}

/**
 * mmc_change_bus_speed() - Change MMC card bus frequency at runtime
 * @host: pointer to mmc host structure
 * @freq: pointer to desired frequency to be set
 *
 * Change the MMC card bus frequency at runtime after the card is
 * initialized. Callers are expected to make sure of the card's
 * state (DATA/RCV/TRANSFER) before changing the frequency at runtime.
 *
 * If the frequency to change is greater than max. supported by card,
 * *freq is changed to max. supported by card. If it is less than min.
 * supported by host, *freq is changed to min. supported by host.
 * Host is assumed to be calimed while calling this funciton.
 */
static int mmc_change_bus_speed(struct mmc_host *host, unsigned long *freq)
{
	int err = 0;
	struct mmc_card *card;
	unsigned long actual_freq;

	card = host->card;

	if (!card || !freq) {
		err = -EINVAL;
		goto out;
	}
	actual_freq = *freq;

	WARN_ON(!host->claimed);

	/*
	 * For scaling up/down HS400 we'll need special handling,
	 * for other timings we can simply do clock frequency change
	 */
	if (mmc_card_hs400(card) ||
		(!mmc_card_hs200(host->card) && *freq == MMC_HS200_MAX_DTR)) {
		err = mmc_set_clock_bus_speed(card, *freq);
		if (err) {
			pr_err("%s: %s: failed (%d)to set bus and clock speed (freq=%lu)\n",
				mmc_hostname(host), __func__, err, *freq);
			goto out;
		}
	} else if (mmc_card_hs200(host->card)) {
		mmc_set_clock(host, *freq);
		err = mmc_hs200_tuning(host->card);
		if (err) {
			pr_warn("%s: %s: tuning execution failed %d\n",
				mmc_hostname(card->host),
				__func__, err);
			mmc_set_clock(host, host->clk_scaling.curr_freq);
		}
	} else {
		if (mmc_card_ddr52(host->card))
			actual_freq = mmc_ddr_freq_accommodation(*freq);
		mmc_set_clock(host, actual_freq);
	}

out:
	return err;
}

static void mmc_init_setup_scaling(struct mmc_card *card, struct mmc_host *host)
{
	card->clk_scaling_lowest = host->f_min;
	if ((card->mmc_avail_type & EXT_CSD_CARD_TYPE_HS400) ||
			(card->mmc_avail_type & EXT_CSD_CARD_TYPE_HS200))
		card->clk_scaling_highest = card->ext_csd.hs200_max_dtr;
	else if ((card->mmc_avail_type & EXT_CSD_CARD_TYPE_HS) ||
			(card->mmc_avail_type & EXT_CSD_CARD_TYPE_DDR_52))
		card->clk_scaling_highest = card->ext_csd.hs_max_dtr;
	else
		card->clk_scaling_highest = card->csd.max_dtr;
}
#endif

static void mmc_initialize_cqe(struct mmc_host *host, struct mmc_card *card)
{
	int err;

	if (host->cqe_ops && !host->cqe_enabled) {
		err = host->cqe_ops->cqe_enable(host, card);
		if (!err) {
			host->cqe_enabled = true;

			if (card->ext_csd.cmdq_en) {
				pr_info("%s: Command Queue Engine enabled\n",
					mmc_hostname(host));
			} else {
				host->hsq_enabled = true;
				pr_info("%s: Host Software Queue enabled\n",
					mmc_hostname(host));
			}
		}
	}
}

/*
 * Handle the detection and initialisation of a card.
 *
@@ -1775,6 +2004,9 @@ static int mmc_init_card(struct mmc_host *host, u32 ocr,
		}
	}

#if defined(CONFIG_SDC_QTI)
	mmc_init_setup_scaling(card, host);
#endif
	/*
	 * Choose the power class with selected bus interface
	 */
@@ -1852,21 +2084,7 @@ static int mmc_init_card(struct mmc_host *host, u32 ocr,
	 */
	card->reenable_cmdq = card->ext_csd.cmdq_en;

	if (host->cqe_ops && !host->cqe_enabled) {
		err = host->cqe_ops->cqe_enable(host, card);
		if (!err) {
			host->cqe_enabled = true;

			if (card->ext_csd.cmdq_en) {
				pr_info("%s: Command Queue Engine enabled\n",
					mmc_hostname(host));
			} else {
				host->hsq_enabled = true;
				pr_info("%s: Host Software Queue enabled\n",
					mmc_hostname(host));
			}
		}
	}
	mmc_initialize_cqe(host, card);

	if (host->caps2 & MMC_CAP2_AVOID_3_3V &&
	    host->ios.signal_voltage == MMC_SIGNAL_VOLTAGE_330) {
@@ -1979,6 +2197,9 @@ static int mmc_poweroff_notify(struct mmc_card *card, unsigned int notify_type)
 */
static void mmc_remove(struct mmc_host *host)
{
#if defined(CONFIG_SDC_QTI)
	mmc_exit_clk_scaling(host);
#endif
	mmc_remove_card(host->card);
	host->card = NULL;
}
@@ -2022,7 +2243,14 @@ static int _mmc_suspend(struct mmc_host *host, bool is_suspend)
	int err = 0;
	unsigned int notify_type = is_suspend ? EXT_CSD_POWER_OFF_SHORT :
					EXT_CSD_POWER_OFF_LONG;

#if defined(CONFIG_SDC_QTI)
	err = mmc_suspend_clk_scaling(host);
	if (err) {
		pr_err("%s: %s: fail to suspend clock scaling (%d)\n",
			mmc_hostname(host), __func__, err);
		return err;
	}
#endif
	mmc_claim_host(host);

	if (mmc_card_suspended(host->card))
@@ -2047,6 +2275,10 @@ static int _mmc_suspend(struct mmc_host *host, bool is_suspend)
	}
out:
	mmc_release_host(host);
#if defined(CONFIG_SDC_QTI)
	if (err)
		mmc_resume_clk_scaling(host);
#endif
	return err;
}

@@ -2076,15 +2308,33 @@ static int _mmc_resume(struct mmc_host *host)

	mmc_claim_host(host);

#if defined(CONFIG_SDC_QTI)
	if (!mmc_card_suspended(host->card)) {
		mmc_release_host(host);
#else
	if (!mmc_card_suspended(host->card))
#endif
		goto out;
#if defined(CONFIG_SDC_QTI)
	}
#endif

	mmc_power_up(host, host->card->ocr);
	err = mmc_init_card(host, host->card->ocr, host->card);
	mmc_card_clr_suspended(host->card);

#if !defined(CONFIG_SDC_QTI)
out:
#endif
	mmc_release_host(host);

#if defined(CONFIG_SDC_QTI)
	err = mmc_resume_clk_scaling(host);
	if (err)
		pr_err("%s: %s: fail to resume clock scaling (%d)\n",
			mmc_hostname(host), __func__, err);
out:
#endif
	return err;
}

@@ -2103,6 +2353,15 @@ static int mmc_shutdown(struct mmc_host *host)
		!(host->caps2 & MMC_CAP2_FULL_PWR_CYCLE))
		err = _mmc_resume(host);

#if defined(CONFIG_SDC_QTI)
	/*
	 * Exit clock scaling so that it doesn't kick in after
	 * power off notification is sent
	 */
	if (host->caps2 & MMC_CAP2_CLK_SCALE)
		mmc_exit_clk_scaling(host);
#endif

	if (!err)
		err = _mmc_suspend(host, false);

@@ -2164,6 +2423,9 @@ static int mmc_can_reset(struct mmc_card *card)
static int _mmc_hw_reset(struct mmc_host *host)
{
	struct mmc_card *card = host->card;
#if defined(CONFIG_SDC_QTI)
	int ret;
#endif

	/*
	 * In the case of recovery, we can't expect flushing the cache to work
@@ -2183,7 +2445,19 @@ static int _mmc_hw_reset(struct mmc_host *host)
		mmc_power_cycle(host, card->ocr);
		mmc_pwrseq_reset(host);
	}

#if defined(CONFIG_SDC_QTI)
	ret = mmc_init_card(host, host->card->ocr, host->card);
	if (ret) {
		pr_err("%s: %s: mmc_init_card failed (%d)\n",
			mmc_hostname(host), __func__, ret);
		return ret;
	}

	return ret;
#else
	return mmc_init_card(host, card->ocr, card);
#endif
}

static const struct mmc_bus_ops mmc_ops = {
@@ -2196,6 +2470,10 @@ static const struct mmc_bus_ops mmc_ops = {
	.alive = mmc_alive,
	.shutdown = mmc_shutdown,
	.hw_reset = _mmc_hw_reset,
#if defined(CONFIG_SDC_QTI)
	.change_bus_speed = mmc_change_bus_speed,
#endif

};

/*
@@ -2252,6 +2530,13 @@ int mmc_attach_mmc(struct mmc_host *host)
		goto remove_card;

	mmc_claim_host(host);
#if defined(CONFIG_SDC_QTI)
	err = mmc_init_clk_scaling(host);
	if (err) {
		mmc_release_host(host);
		goto remove_card;
	}
#endif
	return 0;

remove_card:
Loading