Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 3e825f9b authored by Talel Shenhar's avatar Talel Shenhar
Browse files

mmc: clk-scaling: change locking from irq_save to bh



This commit changes the spinlock used by clock scaling
logic to use the bh flavor instead of irqsave.
This is done in order to avoid the unnecessary irq
disable while going into clock scaling critical sections.

Change-Id: Id660db89dc531336621f40908fd3a4ad4777a12d
Signed-off-by: default avatarTalel Shenhar <tatias@codeaurora.org>
parent 8808aef7
Loading
Loading
Loading
Loading
+14 −21
Original line number Diff line number Diff line
@@ -143,32 +143,30 @@ static bool mmc_is_data_request(struct mmc_request *mmc_request)

static void mmc_clk_scaling_start_busy(struct mmc_host *host, bool lock_needed)
{
	unsigned long flags;
	struct mmc_devfeq_clk_scaling *clk_scaling = &host->clk_scaling;

	if (!clk_scaling->enable)
		return;

	if (lock_needed)
		spin_lock_irqsave(&clk_scaling->lock, flags);
		spin_lock_bh(&clk_scaling->lock);

	clk_scaling->start_busy = ktime_get();
	clk_scaling->is_busy_started = true;

	if (lock_needed)
		spin_unlock_irqrestore(&clk_scaling->lock, flags);
		spin_unlock_bh(&clk_scaling->lock);
}

static void mmc_clk_scaling_stop_busy(struct mmc_host *host, bool lock_needed)
{
	unsigned long flags;
	struct mmc_devfeq_clk_scaling *clk_scaling = &host->clk_scaling;

	if (!clk_scaling->enable)
		return;

	if (lock_needed)
		spin_lock_irqsave(&clk_scaling->lock, flags);
		spin_lock_bh(&clk_scaling->lock);

	if (!clk_scaling->is_busy_started) {
		WARN_ON(1);
@@ -184,7 +182,7 @@ static void mmc_clk_scaling_stop_busy(struct mmc_host *host, bool lock_needed)

out:
	if (lock_needed)
		spin_unlock_irqrestore(&clk_scaling->lock, flags);
		spin_unlock_bh(&clk_scaling->lock);
}

/**
@@ -225,7 +223,6 @@ EXPORT_SYMBOL(mmc_can_scale_clk);
static int mmc_devfreq_get_dev_status(struct device *dev,
		struct devfreq_dev_status *status)
{
	unsigned long flags;
	struct mmc_host *host = container_of(dev, struct mmc_host, class_dev);
	struct mmc_devfeq_clk_scaling *clk_scaling;

@@ -240,7 +237,7 @@ static int mmc_devfreq_get_dev_status(struct device *dev,
	if (!clk_scaling->enable)
		return 0;

	spin_lock_irqsave(&clk_scaling->lock, flags);
	spin_lock_bh(&clk_scaling->lock);

	/* accumulate the busy time of ongoing work */
	memset(status, 0, sizeof(*status));
@@ -262,7 +259,7 @@ static int mmc_devfreq_get_dev_status(struct device *dev,
		status->total_time, status->busy_time,
		status->current_frequency);

	spin_unlock_irqrestore(&clk_scaling->lock, flags);
	spin_unlock_bh(&clk_scaling->lock);

	return 0;
}
@@ -360,7 +357,6 @@ static int mmc_devfreq_set_target(struct device *dev,
	struct mmc_devfeq_clk_scaling *clk_scaling;
	int err = 0;
	int abort;
	unsigned long flags;

	if (!(host && freq)) {
		pr_err("%s: unexpected host/freq parameter\n", __func__);
@@ -383,18 +379,18 @@ static int mmc_devfreq_set_target(struct device *dev,
	if (!host->ios.clock)
		goto out;

	spin_lock_irqsave(&clk_scaling->lock, flags);
	spin_lock_bh(&clk_scaling->lock);
	if (clk_scaling->clk_scaling_in_progress) {
		pr_debug("%s: clocks scaling is already in-progress by mmc thread\n",
			mmc_hostname(host));
		spin_unlock_irqrestore(&clk_scaling->lock, flags);
		spin_unlock_bh(&clk_scaling->lock);
		goto out;
	}
	clk_scaling->need_freq_change = true;
	clk_scaling->target_freq = *freq;
	clk_scaling->state = *freq < clk_scaling->curr_freq ?
		MMC_LOAD_LOW : MMC_LOAD_HIGH;
	spin_unlock_irqrestore(&clk_scaling->lock, flags);
	spin_unlock_bh(&clk_scaling->lock);

	abort = __mmc_claim_host(host, &clk_scaling->devfreq_abort);
	if (abort)
@@ -424,18 +420,17 @@ out:

static void mmc_deferred_scaling(struct mmc_host *host)
{
	unsigned long flags;
	unsigned long target_freq;
	int err;

	if (!host->clk_scaling.enable)
		return;

	spin_lock_irqsave(&host->clk_scaling.lock, flags);
	spin_lock_bh(&host->clk_scaling.lock);

	if (host->clk_scaling.clk_scaling_in_progress ||
		!(host->clk_scaling.need_freq_change)) {
		spin_unlock_irqrestore(&host->clk_scaling.lock, flags);
		spin_unlock_bh(&host->clk_scaling.lock);
		return;
	}

@@ -444,7 +439,7 @@ static void mmc_deferred_scaling(struct mmc_host *host)
	target_freq = host->clk_scaling.target_freq;
	host->clk_scaling.clk_scaling_in_progress = true;
	host->clk_scaling.need_freq_change = false;
	spin_unlock_irqrestore(&host->clk_scaling.lock, flags);
	spin_unlock_bh(&host->clk_scaling.lock);
	pr_debug("%s: doing deferred frequency change (%lu) (%s)\n",
				mmc_hostname(host),
				target_freq, current->comm);
@@ -661,8 +656,6 @@ EXPORT_SYMBOL(mmc_exit_clk_scaling);
 */
void mmc_reset_clk_scale_stats(struct mmc_host *host)
{
	unsigned long flags;

	if (!host) {
		pr_err("bad host parameter\n");
		WARN_ON(1);
@@ -670,9 +663,9 @@ void mmc_reset_clk_scale_stats(struct mmc_host *host)
	}
	if (!host->clk_scaling.enable)
		return;
	spin_lock_irqsave(&host->clk_scaling.lock, flags);
	spin_lock_bh(&host->clk_scaling.lock);
	host->clk_scaling.total_busy_time_us = 0;
	spin_unlock_irqrestore(&host->clk_scaling.lock, flags);
	spin_unlock_bh(&host->clk_scaling.lock);

}
EXPORT_SYMBOL(mmc_reset_clk_scale_stats);