Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit ba125366 authored by qctecmdr's avatar qctecmdr Committed by Gerrit - the friendly Code Review server
Browse files

Merge "mmc: Define config flag to add qcom code to upstream module"

parents c5a30a55 d4b4a26a
Loading
Loading
Loading
Loading
+1 −0
Original line number Diff line number Diff line
@@ -165,3 +165,4 @@ CONFIG_NEURON_APP_BLOCK_SERVER=y
CONFIG_ADSPRPC_QGKI=y
CONFIG_COMMON_CLK_QCOM_DEBUG=y
CONFIG_QMP_DEBUGFS_CLIENT=y
CONFIG_SDC_QTI=y
+7 −0
Original line number Diff line number Diff line
@@ -81,3 +81,10 @@ config MMC_TEST
	  This driver is only of interest to those developing or
	  testing a host driver. Most people should say N here.

config SDC_QTI
	tristate "Enable QTI specific code in MMC upstream driver"
	depends on QGKI && MMC_SDHCI_MSM
	default n
	help
	  This configuration flag allows adding QTI code in
	  MMC upstream driver.
+9 −2
Original line number Diff line number Diff line
@@ -892,7 +892,9 @@ static inline int mmc_blk_part_switch(struct mmc_card *card,
		}

		card->ext_csd.part_config = part_config;

#if defined(CONFIG_SDC_QTI)
		card->part_curr = part_type;
#endif
		ret = mmc_blk_part_switch_post(card, main_md->part_curr);
	}

@@ -1949,13 +1951,18 @@ static void mmc_blk_mq_poll_completion(struct mmc_queue *mq,

static void mmc_blk_mq_dec_in_flight(struct mmc_queue *mq, struct request *req)
{
#if defined(CONFIG_SDC_QTI)
	struct mmc_host *host = mq->card->host;
#endif
	unsigned long flags;
	bool put_card;

	spin_lock_irqsave(&mq->lock, flags);

	mq->in_flight[mmc_issue_type(mq, req)] -= 1;

#if defined(CONFIG_SDC_QTI)
	atomic_dec(&host->active_reqs);
#endif
	put_card = (mmc_tot_in_flight(mq) == 0);

	spin_unlock_irqrestore(&mq->lock, flags);
+693 −3
Original line number Diff line number Diff line
@@ -31,6 +31,9 @@
#include <linux/mmc/mmc.h>
#include <linux/mmc/sd.h>
#include <linux/mmc/slot-gpio.h>
#if defined(CONFIG_SDC_QTI)
#include <linux/devfreq.h>
#endif

#define CREATE_TRACE_POINTS
#include <trace/events/mmc.h>
@@ -41,7 +44,9 @@
#include "host.h"
#include "sdio_bus.h"
#include "pwrseq.h"

#if defined(CONFIG_SDC_QTI)
#include "queue.h"
#endif
#include "mmc_ops.h"
#include "sd_ops.h"
#include "sdio_ops.h"
@@ -109,6 +114,683 @@ static inline void mmc_should_fail_request(struct mmc_host *host,

#endif /* CONFIG_FAIL_MMC_REQUEST */

#if defined(CONFIG_SDC_QTI)
static bool mmc_is_data_request(struct mmc_request *mmc_request)
{
	switch (mmc_request->cmd->opcode) {
	case MMC_READ_SINGLE_BLOCK:
	case MMC_READ_MULTIPLE_BLOCK:
	case MMC_WRITE_BLOCK:
	case MMC_WRITE_MULTIPLE_BLOCK:
		return true;
	default:
		return false;
	}
}

static void mmc_clk_scaling_start_busy(struct mmc_host *host, bool lock_needed)
{
	struct mmc_devfeq_clk_scaling *clk_scaling = &host->clk_scaling;

	if (!clk_scaling->enable)
		return;

	if (lock_needed)
		spin_lock_bh(&clk_scaling->lock);

	clk_scaling->start_busy = ktime_get();
	clk_scaling->is_busy_started = true;

	if (lock_needed)
		spin_unlock_bh(&clk_scaling->lock);
}

static void mmc_clk_scaling_stop_busy(struct mmc_host *host, bool lock_needed)
{
	struct mmc_devfeq_clk_scaling *clk_scaling = &host->clk_scaling;

	if (!clk_scaling->enable)
		return;

	if (lock_needed)
		spin_lock_bh(&clk_scaling->lock);

	if (!clk_scaling->is_busy_started) {
		WARN_ON(1);
		goto out;
	}

	clk_scaling->total_busy_time_us +=
		ktime_to_us(ktime_sub(ktime_get(),
			clk_scaling->start_busy));
	pr_debug("%s: accumulated busy time is %lu usec\n",
		mmc_hostname(host), clk_scaling->total_busy_time_us);
	clk_scaling->is_busy_started = false;

out:
	if (lock_needed)
		spin_unlock_bh(&clk_scaling->lock);
}

/**
 * mmc_can_scale_clk() - Check clock scaling capability
 * @host: pointer to mmc host structure
 */
bool mmc_can_scale_clk(struct mmc_host *host)
{
	if (!host) {
		pr_err("bad host parameter\n");
		WARN_ON(1);
		return false;
	}

	return host->caps2 & MMC_CAP2_CLK_SCALE;
}
EXPORT_SYMBOL(mmc_can_scale_clk);

static int mmc_devfreq_get_dev_status(struct device *dev,
		struct devfreq_dev_status *status)
{
	struct mmc_host *host = container_of(dev, struct mmc_host, class_dev);
	struct mmc_devfeq_clk_scaling *clk_scaling;
	unsigned long flags;

	if (!host) {
		pr_err("bad host parameter\n");
		WARN_ON(1);
		return -EINVAL;
	}

	clk_scaling = &host->clk_scaling;

	if (!clk_scaling->enable)
		return 0;

	spin_lock_irqsave(&host->clk_scaling.lock, flags);

	/* accumulate the busy time of ongoing work */
	memset(status, 0, sizeof(*status));
	if (clk_scaling->is_busy_started) {
		mmc_clk_scaling_stop_busy(host, false);
		mmc_clk_scaling_start_busy(host, false);
	}

	status->busy_time = clk_scaling->total_busy_time_us;
	status->total_time = ktime_to_us(ktime_sub(ktime_get(),
		clk_scaling->measure_interval_start));
	clk_scaling->total_busy_time_us = 0;
	status->current_frequency = clk_scaling->curr_freq;
	clk_scaling->measure_interval_start = ktime_get();

	pr_debug("%s: status: load = %lu%% - total_time=%lu busy_time = %lu, clk=%lu\n",
		mmc_hostname(host),
		(status->busy_time*100)/status->total_time,
		status->total_time, status->busy_time,
		status->current_frequency);

	spin_unlock_irqrestore(&host->clk_scaling.lock, flags);

	return 0;
}

static bool mmc_is_valid_state_for_clk_scaling(struct mmc_host *host)
{
	struct mmc_card *card = host->card;
	u32 status;

	/*
	 * If the current partition type is RPMB, clock switching may not
	 * work properly as sending tuning command (CMD21) is illegal in
	 * this mode.
	 */
	if (!card || (mmc_card_mmc(card) &&
			(card->part_curr == EXT_CSD_PART_CONFIG_ACC_RPMB)))
		return false;

	if (mmc_send_status(card, &status)) {
		pr_err("%s: Get card status fail\n", mmc_hostname(card->host));
		return false;
	}

	return R1_CURRENT_STATE(status) == R1_STATE_TRAN;
}

int mmc_clk_update_freq(struct mmc_host *host,
		unsigned long freq, enum mmc_load state)
{
	int err = 0;

	if (!host) {
		pr_err("bad host parameter\n");
		WARN_ON(1);
		return -EINVAL;
	}

	/* make sure the card supports the frequency we want */
	if (unlikely(freq > host->card->clk_scaling_highest)) {
		freq = host->card->clk_scaling_highest;
		pr_warn("%s: %s: High freq was overridden to %lu\n",
				mmc_hostname(host), __func__,
				host->card->clk_scaling_highest);
	}

	if (unlikely(freq < host->card->clk_scaling_lowest)) {
		freq = host->card->clk_scaling_lowest;
		pr_warn("%s: %s: Low freq was overridden to %lu\n",
			mmc_hostname(host), __func__,
			host->card->clk_scaling_lowest);
	}

	if (freq == host->clk_scaling.curr_freq)
		goto out;

	if (host->cqe_on) {
		err = host->cqe_ops->cqe_wait_for_idle(host);
		if (err) {
			pr_err("%s: %s: CQE went in recovery path\n",
				mmc_hostname(host), __func__);
			goto out;
		}
		host->cqe_ops->cqe_off(host);
	}

	if (!mmc_is_valid_state_for_clk_scaling(host)) {
		pr_debug("%s: invalid state for clock scaling - skipping\n",
			mmc_hostname(host));
		goto out;
	}

	err = host->bus_ops->change_bus_speed(host, &freq);
	if (!err)
		host->clk_scaling.curr_freq = freq;
	else
		pr_err("%s: %s: failed (%d) at freq=%lu\n",
			mmc_hostname(host), __func__, err, freq);
	/*
	 * CQE would be enabled as part of CQE issueing path
	 * So no need to unhalt it explicitly
	 */

out:
	return err;
}
EXPORT_SYMBOL(mmc_clk_update_freq);

static int mmc_devfreq_set_target(struct device *dev,
				unsigned long *freq, u32 devfreq_flags)
{
	struct mmc_host *host = container_of(dev, struct mmc_host, class_dev);
	struct mmc_devfeq_clk_scaling *clk_scaling;
	int err = 0;
	int abort;
	unsigned long pflags = current->flags;
	unsigned long flags;

	/* Ensure scaling would happen even in memory pressure conditions */
	current->flags |= PF_MEMALLOC;

	if (!(host && freq)) {
		pr_err("%s: unexpected host/freq parameter\n", __func__);
		err = -EINVAL;
		goto out;
	}

	clk_scaling = &host->clk_scaling;

	if (!clk_scaling->enable)
		goto out;

	pr_debug("%s: target freq = %lu (%s)\n", mmc_hostname(host),
		*freq, current->comm);

	spin_lock_irqsave(&clk_scaling->lock, flags);
	if (clk_scaling->target_freq == *freq ||
		clk_scaling->skip_clk_scale_freq_update) {
		spin_unlock_irqrestore(&clk_scaling->lock, flags);
		goto out;
	}

	clk_scaling->need_freq_change = true;
	clk_scaling->target_freq = *freq;
	clk_scaling->state = *freq < clk_scaling->curr_freq ?
		MMC_LOAD_LOW : MMC_LOAD_HIGH;
	spin_unlock_irqrestore(&clk_scaling->lock, flags);

	if (!clk_scaling->is_suspended && host->ios.clock)
		abort = __mmc_claim_host(host, NULL,
				&clk_scaling->devfreq_abort);
	else
		goto out;

	if (abort)
		goto out;

	/*
	 * In case we were able to claim host there is no need to
	 * defer the frequency change. It will be done now
	 */
	clk_scaling->need_freq_change = false;

	err = mmc_clk_update_freq(host, *freq, clk_scaling->state);
	if (err && err != -EAGAIN)
		pr_err("%s: clock scale to %lu failed with error %d\n",
			mmc_hostname(host), *freq, err);
	else
		pr_debug("%s: clock change to %lu finished successfully (%s)\n",
			mmc_hostname(host), *freq, current->comm);

	mmc_release_host(host);
out:
	current_restore_flags(pflags, PF_MEMALLOC);
	return err;
}

/**
 * mmc_deferred_scaling() - scale clocks from data path (mmc thread context)
 * @host: pointer to mmc host structure
 *
 * This function does clock scaling in case "need_freq_change" flag was set
 * by the clock scaling logic.
 */
void mmc_deferred_scaling(struct mmc_host *host)
{
	unsigned long target_freq;
	int err;
	struct mmc_devfeq_clk_scaling clk_scaling;
	unsigned long flags;

	if (!host->clk_scaling.enable)
		return;

	spin_lock_irqsave(&host->clk_scaling.lock, flags);

	if (!host->clk_scaling.need_freq_change) {
		spin_unlock_irqrestore(&host->clk_scaling.lock, flags);
		return;
	}

	atomic_inc(&host->clk_scaling.devfreq_abort);
	target_freq = host->clk_scaling.target_freq;
	/*
	 * Store the clock scaling state while the lock is acquired so that
	 * if devfreq context modifies clk_scaling, it will get reflected only
	 * in the next deferred scaling check.
	 */
	clk_scaling = host->clk_scaling;
	host->clk_scaling.need_freq_change = false;
	spin_unlock_irqrestore(&host->clk_scaling.lock, flags);

	pr_debug("%s: doing deferred frequency change (%lu) (%s)\n",
				mmc_hostname(host),
				target_freq, current->comm);

	err = mmc_clk_update_freq(host, target_freq,
		clk_scaling.state);
	if (err && err != -EAGAIN)
		pr_err("%s: failed on deferred scale clocks (%d)\n",
			mmc_hostname(host), err);
	else
		pr_debug("%s: clocks were successfully scaled to %lu (%s)\n",
			mmc_hostname(host),
			target_freq, current->comm);
	atomic_dec(&host->clk_scaling.devfreq_abort);
}
EXPORT_SYMBOL(mmc_deferred_scaling);

static int mmc_devfreq_create_freq_table(struct mmc_host *host)
{
	int i;
	struct mmc_devfeq_clk_scaling *clk_scaling = &host->clk_scaling;

	pr_debug("%s: supported: lowest=%lu, highest=%lu\n",
		mmc_hostname(host),
		host->card->clk_scaling_lowest,
		host->card->clk_scaling_highest);

	/*
	 * Create the frequency table and initialize it with default values.
	 * Initialize it with platform specific frequencies if the frequency
	 * table supplied by platform driver is present, otherwise initialize
	 * it with min and max frequencies supported by the card.
	 */
	if (!clk_scaling->freq_table) {
		if (clk_scaling->pltfm_freq_table_sz)
			clk_scaling->freq_table_sz =
				clk_scaling->pltfm_freq_table_sz;
		else
			clk_scaling->freq_table_sz = 2;

		clk_scaling->freq_table = kcalloc(
			clk_scaling->freq_table_sz,
			sizeof(*(clk_scaling->freq_table)), GFP_KERNEL);
		if (!clk_scaling->freq_table)
			return -ENOMEM;

		if (clk_scaling->pltfm_freq_table) {
			memcpy(clk_scaling->freq_table,
				clk_scaling->pltfm_freq_table,
				(clk_scaling->pltfm_freq_table_sz *
				sizeof(*(clk_scaling->pltfm_freq_table))));
		} else {
			pr_debug("%s: no frequency table defined -  setting default\n",
				mmc_hostname(host));
			clk_scaling->freq_table[0] =
				host->card->clk_scaling_lowest;
			clk_scaling->freq_table[1] =
				host->card->clk_scaling_highest;
			goto out;
		}
	}

	if (host->card->clk_scaling_lowest >
		clk_scaling->freq_table[0])
		pr_debug("%s: frequency table undershot possible freq\n",
			mmc_hostname(host));

	for (i = 0; i < clk_scaling->freq_table_sz; i++) {
		if (clk_scaling->freq_table[i] <=
			host->card->clk_scaling_highest)
			continue;
		clk_scaling->freq_table[i] =
			host->card->clk_scaling_highest;
		clk_scaling->freq_table_sz = i + 1;
		pr_debug("%s: frequency table overshot possible freq (%d)\n",
				mmc_hostname(host), clk_scaling->freq_table[i]);
		break;
	}

out:
	/**
	 * devfreq requires unsigned long type freq_table while the
	 * freq_table in clk_scaling is un32. Here allocates an individual
	 * memory space for it and release it when exit clock scaling.
	 */
	clk_scaling->devfreq_profile.freq_table =  kcalloc(
			clk_scaling->freq_table_sz,
			sizeof(*(clk_scaling->devfreq_profile.freq_table)),
			GFP_KERNEL);
	if (!clk_scaling->devfreq_profile.freq_table) {
		kfree(clk_scaling->freq_table);
		return -ENOMEM;
	}
	clk_scaling->devfreq_profile.max_state = clk_scaling->freq_table_sz;

	for (i = 0; i < clk_scaling->freq_table_sz; i++) {
		clk_scaling->devfreq_profile.freq_table[i] =
			clk_scaling->freq_table[i];
		pr_debug("%s: freq[%d] = %u\n",
			mmc_hostname(host), i, clk_scaling->freq_table[i]);
	}

	return 0;
}

/**
 * mmc_init_devfreq_clk_scaling() - Initialize clock scaling
 * @host: pointer to mmc host structure
 *
 * Initialize clock scaling for supported hosts. It is assumed that the caller
 * ensure clock is running at maximum possible frequency before calling this
 * function. Shall use struct devfreq_simple_ondemand_data to configure
 * governor.
 */
int mmc_init_clk_scaling(struct mmc_host *host)
{
	int err;
	struct devfreq *devfreq;

	if (!host || !host->card) {
		pr_err("%s: unexpected host/card parameters\n",
			__func__);
		return -EINVAL;
	}

	if (!mmc_can_scale_clk(host) ||
		!host->bus_ops->change_bus_speed) {
		pr_debug("%s: clock scaling is not supported\n",
			mmc_hostname(host));
		return 0;
	}

	pr_debug("registering %s dev (%pK) to devfreq\n",
		mmc_hostname(host),
		mmc_classdev(host));

	if (host->clk_scaling.devfreq) {
		pr_err("%s: dev is already registered for dev %pK\n",
			mmc_hostname(host),
			mmc_dev(host));
		return -EPERM;
	}
	spin_lock_init(&host->clk_scaling.lock);
	atomic_set(&host->clk_scaling.devfreq_abort, 0);
	host->clk_scaling.curr_freq = host->ios.clock;
	host->clk_scaling.need_freq_change = false;
	host->clk_scaling.is_busy_started = false;

	host->clk_scaling.devfreq_profile.polling_ms =
		host->clk_scaling.polling_delay_ms;
	host->clk_scaling.devfreq_profile.get_dev_status =
		mmc_devfreq_get_dev_status;
	host->clk_scaling.devfreq_profile.target = mmc_devfreq_set_target;
	host->clk_scaling.devfreq_profile.initial_freq = host->ios.clock;

	host->clk_scaling.ondemand_gov_data.upthreshold =
		host->clk_scaling.upthreshold;
	host->clk_scaling.ondemand_gov_data.downdifferential =
		host->clk_scaling.upthreshold - host->clk_scaling.downthreshold;

	err = mmc_devfreq_create_freq_table(host);
	if (err) {
		pr_err("%s: fail to create devfreq frequency table\n",
			mmc_hostname(host));
		return err;
	}

	dev_pm_opp_add(mmc_classdev(host),
		host->clk_scaling.devfreq_profile.freq_table[0], 0);
	dev_pm_opp_add(mmc_classdev(host),
		host->clk_scaling.devfreq_profile.freq_table[1], 0);

	pr_debug("%s: adding devfreq with: upthreshold=%u downthreshold=%u polling=%u\n",
		mmc_hostname(host),
		host->clk_scaling.ondemand_gov_data.upthreshold,
		host->clk_scaling.ondemand_gov_data.downdifferential,
		host->clk_scaling.devfreq_profile.polling_ms);

	devfreq = devfreq_add_device(
		mmc_classdev(host),
		&host->clk_scaling.devfreq_profile,
		"simple_ondemand",
		&host->clk_scaling.ondemand_gov_data);

	if (IS_ERR(devfreq)) {
		pr_err("%s: unable to register with devfreq\n",
			mmc_hostname(host));
		dev_pm_opp_remove(mmc_classdev(host),
			host->clk_scaling.devfreq_profile.freq_table[0]);
		dev_pm_opp_remove(mmc_classdev(host),
			host->clk_scaling.devfreq_profile.freq_table[1]);
		return PTR_ERR(devfreq);
	}

	host->clk_scaling.devfreq = devfreq;
	pr_debug("%s: clk scaling is enabled for device %s (%pK) with devfreq %pK (clock = %uHz)\n",
		mmc_hostname(host),
		dev_name(mmc_classdev(host)),
		mmc_classdev(host),
		host->clk_scaling.devfreq,
		host->ios.clock);

	host->clk_scaling.enable = true;

	return err;
}
EXPORT_SYMBOL(mmc_init_clk_scaling);

/**
 * mmc_suspend_clk_scaling() - suspend clock scaling
 * @host: pointer to mmc host structure
 *
 * This API will suspend devfreq feature for the specific host.
 * The statistics collected by mmc will be cleared.
 * This function is intended to be called by the pm callbacks
 * (e.g. runtime_suspend, suspend) of the mmc device
 */
int mmc_suspend_clk_scaling(struct mmc_host *host)
{
	int err;

	if (!host) {
		WARN(1, "bad host parameter\n");
		return -EINVAL;
	}

	if (!mmc_can_scale_clk(host) || !host->clk_scaling.enable ||
			host->clk_scaling.is_suspended)
		return 0;

	if (!host->clk_scaling.devfreq) {
		pr_err("%s: %s: no devfreq is assosiated with this device\n",
			mmc_hostname(host), __func__);
		return -EPERM;
	}

	atomic_inc(&host->clk_scaling.devfreq_abort);
	wake_up(&host->wq);
	err = devfreq_suspend_device(host->clk_scaling.devfreq);
	if (err) {
		pr_err("%s: %s: failed to suspend devfreq\n",
			mmc_hostname(host), __func__);
		return err;
	}
	host->clk_scaling.is_suspended = true;

	host->clk_scaling.total_busy_time_us = 0;

	pr_debug("%s: devfreq was removed\n", mmc_hostname(host));

	return 0;
}
EXPORT_SYMBOL(mmc_suspend_clk_scaling);

/**
 * mmc_resume_clk_scaling() - resume clock scaling
 * @host: pointer to mmc host structure
 *
 * This API will resume devfreq feature for the specific host.
 * This API is intended to be called by the pm callbacks
 * (e.g. runtime_suspend, suspend) of the mmc device
 */
int mmc_resume_clk_scaling(struct mmc_host *host)
{
	int err = 0;
	u32 max_clk_idx = 0;
	u32 devfreq_max_clk = 0;
	u32 devfreq_min_clk = 0;

	if (!host) {
		WARN(1, "bad host parameter\n");
		return -EINVAL;
	}

	if (!mmc_can_scale_clk(host))
		return 0;

	/*
	 * If clock scaling is already exited when resume is called, like
	 * during mmc shutdown, it is not an error and should not fail the
	 * API calling this.
	 */
	if (!host->clk_scaling.devfreq) {
		pr_warn("%s: %s: no devfreq is assosiated with this device\n",
			mmc_hostname(host), __func__);
		return 0;
	}

	atomic_set(&host->clk_scaling.devfreq_abort, 0);

	max_clk_idx = host->clk_scaling.freq_table_sz - 1;
	devfreq_max_clk = host->clk_scaling.freq_table[max_clk_idx];
	devfreq_min_clk = host->clk_scaling.freq_table[0];

	host->clk_scaling.curr_freq = devfreq_max_clk;
	if (host->ios.clock < host->clk_scaling.freq_table[max_clk_idx])
		host->clk_scaling.curr_freq = devfreq_min_clk;
	host->clk_scaling.target_freq = host->clk_scaling.curr_freq;

	err = devfreq_resume_device(host->clk_scaling.devfreq);
	if (err) {
		pr_err("%s: %s: failed to resume devfreq (%d)\n",
			mmc_hostname(host), __func__, err);
	} else {
		host->clk_scaling.is_suspended = false;
		pr_debug("%s: devfreq resumed\n", mmc_hostname(host));
	}

	return err;
}
EXPORT_SYMBOL(mmc_resume_clk_scaling);

/**
 * mmc_exit_devfreq_clk_scaling() - Disable clock scaling
 * @host: pointer to mmc host structure
 *
 * Disable clock scaling permanently.
 */
int mmc_exit_clk_scaling(struct mmc_host *host)
{
	int err;

	if (!host) {
		pr_err("%s: bad host parameter\n", __func__);
		WARN_ON(1);
		return -EINVAL;
	}

	if (!mmc_can_scale_clk(host))
		return 0;

	if (!host->clk_scaling.devfreq) {
		pr_err("%s: %s: no devfreq is assosiated with this device\n",
			mmc_hostname(host), __func__);
		return -EPERM;
	}

	err = mmc_suspend_clk_scaling(host);
	if (err) {
		pr_err("%s: %s: fail to suspend clock scaling (%d)\n",
			mmc_hostname(host), __func__,  err);
		return err;
	}

	err = devfreq_remove_device(host->clk_scaling.devfreq);
	if (err) {
		pr_err("%s: remove devfreq failed (%d)\n",
			mmc_hostname(host), err);
		return err;
	}

	dev_pm_opp_remove(mmc_classdev(host),
		host->clk_scaling.devfreq_profile.freq_table[0]);
	dev_pm_opp_remove(mmc_classdev(host),
		host->clk_scaling.devfreq_profile.freq_table[1]);

	kfree(host->clk_scaling.devfreq_profile.freq_table);

	host->clk_scaling.devfreq = NULL;
	atomic_set(&host->clk_scaling.devfreq_abort, 1);

	kfree(host->clk_scaling.freq_table);
	host->clk_scaling.freq_table = NULL;

	pr_debug("%s: devfreq was removed\n", mmc_hostname(host));

	return 0;
}
EXPORT_SYMBOL(mmc_exit_clk_scaling);
#endif

static inline void mmc_complete_cmd(struct mmc_request *mrq)
{
	if (mrq->cap_cmd_during_tfr && !completion_done(&mrq->cmd_completion))
@@ -139,7 +821,10 @@ void mmc_request_done(struct mmc_host *host, struct mmc_request *mrq)
{
	struct mmc_command *cmd = mrq->cmd;
	int err = cmd->error;

#if defined(CONFIG_SDC_QTI)
	if (host->clk_scaling.is_busy_started)
		mmc_clk_scaling_stop_busy(host, true);
#endif
	/* Flag re-tuning needed on CRC errors */
	if (cmd->opcode != MMC_SEND_TUNING_BLOCK &&
	    cmd->opcode != MMC_SEND_TUNING_BLOCK_HS200 &&
@@ -350,7 +1035,12 @@ int mmc_start_request(struct mmc_host *host, struct mmc_request *mrq)
	err = mmc_mrq_prep(host, mrq);
	if (err)
		return err;

#if defined(CONFIG_SDC_QTI)
	if (mmc_is_data_request(mrq)) {
		mmc_deferred_scaling(host);
		mmc_clk_scaling_start_busy(host, true);
	}
#endif
	led_trigger_event(host->led, LED_FULL);
	__mmc_start_request(host, mrq);

+19 −1
Original line number Diff line number Diff line
@@ -14,6 +14,9 @@
struct mmc_host;
struct mmc_card;
struct mmc_request;
#if defined(CONFIG_SDC_QTI)
struct mmc_queue;
#endif

#define MMC_CMD_RETRIES        3

@@ -29,6 +32,9 @@ struct mmc_bus_ops {
	int (*shutdown)(struct mmc_host *);
	int (*hw_reset)(struct mmc_host *);
	int (*sw_reset)(struct mmc_host *);
#if defined(CONFIG_SDC_QTI)
	int (*change_bus_speed)(struct mmc_host *host, unsigned long *freq);
#endif
};

void mmc_attach_bus(struct mmc_host *host, const struct mmc_bus_ops *ops);
@@ -57,7 +63,10 @@ void mmc_power_off(struct mmc_host *host);
void mmc_power_cycle(struct mmc_host *host, u32 ocr);
void mmc_set_initial_state(struct mmc_host *host);
u32 mmc_vddrange_to_ocrmask(int vdd_min, int vdd_max);

#if defined(CONFIG_SDC_QTI)
int mmc_clk_update_freq(struct mmc_host *host,
		unsigned long freq, enum mmc_load state);
#endif
static inline void mmc_delay(unsigned int ms)
{
	if (ms <= 20)
@@ -89,6 +98,15 @@ void mmc_remove_host_debugfs(struct mmc_host *host);
void mmc_add_card_debugfs(struct mmc_card *card);
void mmc_remove_card_debugfs(struct mmc_card *card);

#if defined(CONFIG_SDC_QTI)
extern bool mmc_can_scale_clk(struct mmc_host *host);
extern int mmc_init_clk_scaling(struct mmc_host *host);
extern int mmc_suspend_clk_scaling(struct mmc_host *host);
extern int mmc_resume_clk_scaling(struct mmc_host *host);
extern int mmc_exit_clk_scaling(struct mmc_host *host);
extern void mmc_deferred_scaling(struct mmc_host *host);
extern unsigned long mmc_get_max_frequency(struct mmc_host *host);
#endif
int mmc_execute_tuning(struct mmc_card *card);
int mmc_hs200_to_hs400(struct mmc_card *card);
int mmc_hs400_to_hs200(struct mmc_card *card);
Loading