Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 7d6515f3 authored by Linux Build Service Account's avatar Linux Build Service Account Committed by Gerrit - the friendly Code Review server
Browse files

Merge "scsi: ufs: reduce auto hibern8 timeout to save power"

parents f1c69e2f dfd06cdf
Loading
Loading
Loading
Loading
+46 −25
Original line number Diff line number Diff line
@@ -1176,6 +1176,12 @@ static int ufshcd_scale_clks(struct ufs_hba *hba, bool scale_up)
	return ret;
}

static inline void ufshcd_cancel_gate_work(struct ufs_hba *hba)
{
	hrtimer_cancel(&hba->clk_gating.gate_hrtimer);
	cancel_work_sync(&hba->clk_gating.gate_work);
}

static void ufshcd_ungate_work(struct work_struct *work)
{
	int ret;
@@ -1183,7 +1189,7 @@ static void ufshcd_ungate_work(struct work_struct *work)
	struct ufs_hba *hba = container_of(work, struct ufs_hba,
			clk_gating.ungate_work);

	cancel_delayed_work_sync(&hba->clk_gating.gate_work);
	ufshcd_cancel_gate_work(hba);

	spin_lock_irqsave(hba->host->host_lock, flags);
	if (hba->clk_gating.state == CLKS_ON) {
@@ -1254,14 +1260,18 @@ start:
		}
		break;
	case REQ_CLKS_OFF:
		if (cancel_delayed_work(&hba->clk_gating.gate_work)) {
		/*
		 * If the timer was active but the callback was not running
		 * we have nothing to do, just change state and return.
		 */
		if (hrtimer_try_to_cancel(&hba->clk_gating.gate_hrtimer) == 1) {
			hba->clk_gating.state = CLKS_ON;
			trace_ufshcd_clk_gating(dev_name(hba->dev),
				hba->clk_gating.state);
			break;
		}
		/*
		 * If we here, it means gating work is either done or
		 * If we are here, it means gating work is either done or
		 * currently running. Hence, fall through to cancel gating
		 * work and to enable clocks.
		 */
@@ -1301,7 +1311,7 @@ EXPORT_SYMBOL_GPL(ufshcd_hold);
static void ufshcd_gate_work(struct work_struct *work)
{
	struct ufs_hba *hba = container_of(work, struct ufs_hba,
			clk_gating.gate_work.work);
						clk_gating.gate_work);
	unsigned long flags;

	spin_lock_irqsave(hba->host->host_lock, flags);
@@ -1394,8 +1404,9 @@ static void __ufshcd_release(struct ufs_hba *hba, bool no_sched)
	hba->clk_gating.state = REQ_CLKS_OFF;
	trace_ufshcd_clk_gating(dev_name(hba->dev), hba->clk_gating.state);

	schedule_delayed_work(&hba->clk_gating.gate_work,
			      msecs_to_jiffies(hba->clk_gating.delay_ms));
	hrtimer_start(&hba->clk_gating.gate_hrtimer,
			ms_to_ktime(hba->clk_gating.delay_ms),
			HRTIMER_MODE_REL);
}

void ufshcd_release(struct ufs_hba *hba, bool no_sched)
@@ -1523,6 +1534,17 @@ out:
	return count;
}

static enum hrtimer_restart ufshcd_clkgate_hrtimer_handler(
					struct hrtimer *timer)
{
	struct ufs_hba *hba = container_of(timer, struct ufs_hba,
					   clk_gating.gate_hrtimer);

	schedule_work(&hba->clk_gating.gate_work);

	return HRTIMER_NORESTART;
}

static void ufshcd_init_clk_gating(struct ufs_hba *hba)
{
	struct ufs_clk_gating *gating = &hba->clk_gating;
@@ -1539,27 +1561,25 @@ static void ufshcd_init_clk_gating(struct ufs_hba *hba)
	if (ufshcd_is_auto_hibern8_supported(hba))
		hba->caps &= ~UFSHCD_CAP_HIBERN8_WITH_CLK_GATING;

	INIT_DELAYED_WORK(&gating->gate_work, ufshcd_gate_work);
	INIT_WORK(&gating->gate_work, ufshcd_gate_work);
	INIT_WORK(&gating->ungate_work, ufshcd_ungate_work);
	/*
	 * Clock gating work must be executed only after auto hibern8
	 * timeout has expired in the hardware or after aggressive
	 * hibern8 on idle software timeout. Using jiffy based low
	 * resolution delayed work is not reliable to guarantee this,
	 * hence use a high resolution timer to make sure we schedule
	 * the gate work precisely more than hibern8 timeout.
	 *
	 * Always make sure gating->delay_ms > hibern8_on_idle->delay_ms
	 */
	hrtimer_init(&gating->gate_hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
	gating->gate_hrtimer.function = ufshcd_clkgate_hrtimer_handler;

	gating->is_enabled = true;

	/*
	 * Scheduling the delayed work after 1 jiffies will make the work to
	 * get schedule any time from 0ms to 1000/HZ ms which is not desirable
	 * for hibern8 enter work as it may impact the performance if it gets
	 * scheduled almost immediately. Hence make sure that hibern8 enter
	 * work gets scheduled atleast after 2 jiffies (any time between
	 * 1000/HZ ms to 2000/HZ ms).
	 */
	gating->delay_ms_pwr_save = jiffies_to_msecs(
		max_t(unsigned long,
		      msecs_to_jiffies(UFSHCD_CLK_GATING_DELAY_MS_PWR_SAVE),
		      2));
	gating->delay_ms_perf = jiffies_to_msecs(
		max_t(unsigned long,
		      msecs_to_jiffies(UFSHCD_CLK_GATING_DELAY_MS_PERF),
		      2));
	gating->delay_ms_pwr_save = UFSHCD_CLK_GATING_DELAY_MS_PWR_SAVE;
	gating->delay_ms_perf = UFSHCD_CLK_GATING_DELAY_MS_PERF;

	/* start with performance mode */
	gating->delay_ms = gating->delay_ms_perf;
@@ -1616,8 +1636,8 @@ static void ufshcd_exit_clk_gating(struct ufs_hba *hba)
		device_remove_file(hba->dev, &hba->clk_gating.delay_attr);
	}
	device_remove_file(hba->dev, &hba->clk_gating.enable_attr);
	ufshcd_cancel_gate_work(hba);
	cancel_work_sync(&hba->clk_gating.ungate_work);
	cancel_delayed_work_sync(&hba->clk_gating.gate_work);
}

static void ufshcd_set_auto_hibern8_timer(struct ufs_hba *hba, u32 delay)
@@ -1928,6 +1948,7 @@ static void ufshcd_init_hibern8_on_idle(struct ufs_hba *hba)
		return;

	if (ufshcd_is_auto_hibern8_supported(hba)) {
		hba->hibern8_on_idle.delay_ms = 1;
		hba->hibern8_on_idle.state = AUTO_HIBERN8;
		/*
		 * Disable SW hibern8 enter on idle in case
@@ -1935,13 +1956,13 @@ static void ufshcd_init_hibern8_on_idle(struct ufs_hba *hba)
		 */
		hba->caps &= ~UFSHCD_CAP_HIBERN8_ENTER_ON_IDLE;
	} else {
		hba->hibern8_on_idle.delay_ms = 10;
		INIT_DELAYED_WORK(&hba->hibern8_on_idle.enter_work,
				  ufshcd_hibern8_enter_work);
		INIT_WORK(&hba->hibern8_on_idle.exit_work,
			  ufshcd_hibern8_exit_work);
	}

	hba->hibern8_on_idle.delay_ms = 10;
	hba->hibern8_on_idle.is_enabled = true;

	hba->hibern8_on_idle.delay_attr.show =
+6 −3
Original line number Diff line number Diff line
@@ -39,6 +39,7 @@

#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/hrtimer.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/io.h>
@@ -396,8 +397,9 @@ enum clk_gating_state {

/**
 * struct ufs_clk_gating - UFS clock gating related info
 * @gate_work: worker to turn off clocks after some delay as specified in
 * delay_ms
 * @gate_hrtimer: hrtimer to invoke @gate_work after some delay as
 * specified in @delay_ms
 * @gate_work: worker to turn off clocks
 * @ungate_work: worker to turn on clocks that will be used in case of
 * interrupt context
 * @state: the current clocks state
@@ -415,7 +417,8 @@ enum clk_gating_state {
 * completion before gating clocks.
 */
struct ufs_clk_gating {
	struct delayed_work gate_work;
	struct hrtimer gate_hrtimer;
	struct work_struct gate_work;
	struct work_struct ungate_work;
	enum clk_gating_state state;
	unsigned long delay_ms;