Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit b096d5ef authored by Linux Build Service Account's avatar Linux Build Service Account Committed by Gerrit - the friendly Code Review server
Browse files

Merge "mmc: core: Use new flag for suspending clk scaling"

parents 0fb0c88d f4ac989c
Loading
Loading
Loading
Loading
+21 −24
Original line number Original line Diff line number Diff line
@@ -507,28 +507,24 @@ static int mmc_devfreq_set_target(struct device *dev,
	pr_debug("%s: target freq = %lu (%s)\n", mmc_hostname(host),
	pr_debug("%s: target freq = %lu (%s)\n", mmc_hostname(host),
		*freq, current->comm);
		*freq, current->comm);


	if ((clk_scaling->curr_freq == *freq) ||
		clk_scaling->skip_clk_scale_freq_update)
		goto out;

	/* No need to scale the clocks if they are gated */
	if (!host->ios.clock)
		goto out;

	spin_lock_bh(&clk_scaling->lock);
	spin_lock_bh(&clk_scaling->lock);
	if (clk_scaling->clk_scaling_in_progress) {
	if (clk_scaling->target_freq == *freq ||
		pr_debug("%s: clocks scaling is already in-progress by mmc thread\n",
		clk_scaling->skip_clk_scale_freq_update) {
			mmc_hostname(host));
		spin_unlock_bh(&clk_scaling->lock);
		spin_unlock_bh(&clk_scaling->lock);
		goto out;
		goto out;
	}
	}

	clk_scaling->need_freq_change = true;
	clk_scaling->need_freq_change = true;
	clk_scaling->target_freq = *freq;
	clk_scaling->target_freq = *freq;
	clk_scaling->state = *freq < clk_scaling->curr_freq ?
	clk_scaling->state = *freq < clk_scaling->curr_freq ?
		MMC_LOAD_LOW : MMC_LOAD_HIGH;
		MMC_LOAD_LOW : MMC_LOAD_HIGH;
	spin_unlock_bh(&clk_scaling->lock);
	spin_unlock_bh(&clk_scaling->lock);


	if (!clk_scaling->is_suspended && host->ios.clock)
		abort = __mmc_claim_host(host, &clk_scaling->devfreq_abort);
		abort = __mmc_claim_host(host, &clk_scaling->devfreq_abort);
	else
		goto out;

	if (abort)
	if (abort)
		goto out;
		goto out;


@@ -572,6 +568,7 @@ void mmc_deferred_scaling(struct mmc_host *host)
{
{
	unsigned long target_freq;
	unsigned long target_freq;
	int err;
	int err;
	struct mmc_devfeq_clk_scaling clk_scaling;


	if (!host->clk_scaling.enable)
	if (!host->clk_scaling.enable)
		return;
		return;
@@ -581,8 +578,7 @@ void mmc_deferred_scaling(struct mmc_host *host)


	spin_lock_bh(&host->clk_scaling.lock);
	spin_lock_bh(&host->clk_scaling.lock);


	if (host->clk_scaling.clk_scaling_in_progress ||
	if (!host->clk_scaling.need_freq_change) {
		!(host->clk_scaling.need_freq_change)) {
		spin_unlock_bh(&host->clk_scaling.lock);
		spin_unlock_bh(&host->clk_scaling.lock);
		return;
		return;
	}
	}
@@ -590,7 +586,12 @@ void mmc_deferred_scaling(struct mmc_host *host)


	atomic_inc(&host->clk_scaling.devfreq_abort);
	atomic_inc(&host->clk_scaling.devfreq_abort);
	target_freq = host->clk_scaling.target_freq;
	target_freq = host->clk_scaling.target_freq;
	host->clk_scaling.clk_scaling_in_progress = true;
	/*
	 * Store the clock scaling state while the lock is acquired so that
	 * if devfreq context modifies clk_scaling, it will get reflected only
	 * in the next deferred scaling check.
	 */
	clk_scaling = host->clk_scaling;
	host->clk_scaling.need_freq_change = false;
	host->clk_scaling.need_freq_change = false;
	spin_unlock_bh(&host->clk_scaling.lock);
	spin_unlock_bh(&host->clk_scaling.lock);
	pr_debug("%s: doing deferred frequency change (%lu) (%s)\n",
	pr_debug("%s: doing deferred frequency change (%lu) (%s)\n",
@@ -598,7 +599,7 @@ void mmc_deferred_scaling(struct mmc_host *host)
				target_freq, current->comm);
				target_freq, current->comm);


	err = mmc_clk_update_freq(host, target_freq,
	err = mmc_clk_update_freq(host, target_freq,
		host->clk_scaling.state);
		clk_scaling.state);
	if (err && err != -EAGAIN) {
	if (err && err != -EAGAIN) {
		pr_err("%s: failed on deferred scale clocks (%d)\n",
		pr_err("%s: failed on deferred scale clocks (%d)\n",
			mmc_hostname(host), err);
			mmc_hostname(host), err);
@@ -608,7 +609,6 @@ void mmc_deferred_scaling(struct mmc_host *host)
			mmc_hostname(host),
			mmc_hostname(host),
			target_freq, current->comm);
			target_freq, current->comm);
	}
	}
	host->clk_scaling.clk_scaling_in_progress = false;
	atomic_dec(&host->clk_scaling.devfreq_abort);
	atomic_dec(&host->clk_scaling.devfreq_abort);
}
}
EXPORT_SYMBOL(mmc_deferred_scaling);
EXPORT_SYMBOL(mmc_deferred_scaling);
@@ -738,7 +738,6 @@ int mmc_init_clk_scaling(struct mmc_host *host)
	spin_lock_init(&host->clk_scaling.lock);
	spin_lock_init(&host->clk_scaling.lock);
	atomic_set(&host->clk_scaling.devfreq_abort, 0);
	atomic_set(&host->clk_scaling.devfreq_abort, 0);
	host->clk_scaling.curr_freq = host->ios.clock;
	host->clk_scaling.curr_freq = host->ios.clock;
	host->clk_scaling.clk_scaling_in_progress = false;
	host->clk_scaling.need_freq_change = false;
	host->clk_scaling.need_freq_change = false;
	host->clk_scaling.is_busy_started = false;
	host->clk_scaling.is_busy_started = false;


@@ -809,7 +808,8 @@ int mmc_suspend_clk_scaling(struct mmc_host *host)
		return -EINVAL;
		return -EINVAL;
	}
	}


	if (!mmc_can_scale_clk(host) || !host->clk_scaling.enable)
	if (!mmc_can_scale_clk(host) || !host->clk_scaling.enable ||
			host->clk_scaling.is_suspended)
		return 0;
		return 0;


	if (!host->clk_scaling.devfreq) {
	if (!host->clk_scaling.devfreq) {
@@ -826,7 +826,7 @@ int mmc_suspend_clk_scaling(struct mmc_host *host)
			mmc_hostname(host), __func__);
			mmc_hostname(host), __func__);
		return err;
		return err;
	}
	}
	host->clk_scaling.enable = false;
	host->clk_scaling.is_suspended = true;


	host->clk_scaling.total_busy_time_us = 0;
	host->clk_scaling.total_busy_time_us = 0;


@@ -880,15 +880,12 @@ int mmc_resume_clk_scaling(struct mmc_host *host)
	if (host->ios.clock < host->clk_scaling.freq_table[max_clk_idx])
	if (host->ios.clock < host->clk_scaling.freq_table[max_clk_idx])
		host->clk_scaling.curr_freq = devfreq_min_clk;
		host->clk_scaling.curr_freq = devfreq_min_clk;


	host->clk_scaling.clk_scaling_in_progress = false;
	host->clk_scaling.need_freq_change = false;

	err = devfreq_resume_device(host->clk_scaling.devfreq);
	err = devfreq_resume_device(host->clk_scaling.devfreq);
	if (err) {
	if (err) {
		pr_err("%s: %s: failed to resume devfreq (%d)\n",
		pr_err("%s: %s: failed to resume devfreq (%d)\n",
			mmc_hostname(host), __func__, err);
			mmc_hostname(host), __func__, err);
	} else {
	} else {
		host->clk_scaling.enable = true;
		host->clk_scaling.is_suspended = false;
		pr_debug("%s: devfreq resumed\n", mmc_hostname(host));
		pr_debug("%s: devfreq resumed\n", mmc_hostname(host));
	}
	}


+4 −1
Original line number Original line Diff line number Diff line
@@ -761,6 +761,7 @@ static ssize_t store_enable(struct device *dev,
		/* Suspend the clock scaling and mask host capability */
		/* Suspend the clock scaling and mask host capability */
		if (host->clk_scaling.enable)
		if (host->clk_scaling.enable)
			mmc_suspend_clk_scaling(host);
			mmc_suspend_clk_scaling(host);
		host->clk_scaling.enable = false;
		host->caps2 &= ~MMC_CAP2_CLK_SCALE;
		host->caps2 &= ~MMC_CAP2_CLK_SCALE;
		host->clk_scaling.state = MMC_LOAD_HIGH;
		host->clk_scaling.state = MMC_LOAD_HIGH;
		/* Set to max. frequency when disabling */
		/* Set to max. frequency when disabling */
@@ -769,9 +770,11 @@ static ssize_t store_enable(struct device *dev,
	} else if (value) {
	} else if (value) {
		/* Unmask host capability and resume scaling */
		/* Unmask host capability and resume scaling */
		host->caps2 |= MMC_CAP2_CLK_SCALE;
		host->caps2 |= MMC_CAP2_CLK_SCALE;
		if (!host->clk_scaling.enable)
		if (!host->clk_scaling.enable) {
			host->clk_scaling.enable = true;
			mmc_resume_clk_scaling(host);
			mmc_resume_clk_scaling(host);
		}
		}
	}


	mmc_put_card(host->card);
	mmc_put_card(host->card);


+2 −2
Original line number Original line Diff line number Diff line
@@ -350,9 +350,9 @@ enum dev_state {
 * @upthreshold: up-threshold supplied to ondemand governor
 * @upthreshold: up-threshold supplied to ondemand governor
 * @downthreshold: down-threshold supplied to ondemand governor
 * @downthreshold: down-threshold supplied to ondemand governor
 * @need_freq_change: flag indicating if a frequency change is required
 * @need_freq_change: flag indicating if a frequency change is required
 * @clk_scaling_in_progress: flag indicating if there's ongoing frequency change
 * @is_busy_started: flag indicating if a request is handled by the HW
 * @is_busy_started: flag indicating if a request is handled by the HW
 * @enable: flag indicating if the clock scaling logic is enabled for this host
 * @enable: flag indicating if the clock scaling logic is enabled for this host
 * @is_suspended: to make devfreq request queued when mmc is suspened
 */
 */
struct mmc_devfeq_clk_scaling {
struct mmc_devfeq_clk_scaling {
	spinlock_t	lock;
	spinlock_t	lock;
@@ -377,9 +377,9 @@ struct mmc_devfeq_clk_scaling {
	unsigned int	lower_bus_speed_mode;
	unsigned int	lower_bus_speed_mode;
#define MMC_SCALING_LOWER_DDR52_MODE	1
#define MMC_SCALING_LOWER_DDR52_MODE	1
	bool		need_freq_change;
	bool		need_freq_change;
	bool		clk_scaling_in_progress;
	bool		is_busy_started;
	bool		is_busy_started;
	bool		enable;
	bool		enable;
	bool		is_suspended;
};
};


struct mmc_host {
struct mmc_host {