Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 5b4972fa authored by Stephen Boyd's avatar Stephen Boyd Committed by Mahesh Sivasubramanian
Browse files

drivers: cpuidle: lpm-levels: Support s2idle



If we support suspend-to-idle we can avoid the overhead in
hotplugging out CPUs and tearing things down during suspend. Most
things are already in place to support this so we can pretty much
re-use the platform suspend ops to enter the freeze state for
suspending with cpuidle. We need to move the clk printing call
into cluster_configure() so that we only call this when we're
going into suspend on the last CPU in the case of freeze. Putting
it here shares code between both versions of suspend.

Change-Id: Ib062d93a0c122a644af4a729dbb202b88c9a6c56
Signed-off-by: default avatarStephen Boyd <sboyd@codeaurora.org>
parent ce9027a6
Loading
Loading
Loading
Loading
+42 −8
Original line number Diff line number Diff line
@@ -1038,8 +1038,18 @@ static int cluster_configure(struct lpm_cluster *cluster, int idx,
	}

	if (level->notify_rpm) {
		/*
		 * Print the clocks which are enabled during system suspend
		 * This debug information is useful to know which are the
		 * clocks that are enabled and preventing the system level
		 * LPMs(XO and Vmin).
		 */
		if (!from_idle)
			clock_debug_print_enabled(true);

		cpu = get_next_online_cpu(from_idle);
		cpumask_copy(&cpumask, cpumask_of(cpu));

		clear_predict_history();
		clear_cl_predict_history();
		if (sys_pm_ops && sys_pm_ops->enter)
@@ -1395,6 +1405,30 @@ static int lpm_cpuidle_enter(struct cpuidle_device *dev,
	return idx;
}

static void lpm_cpuidle_freeze(struct cpuidle_device *dev,
		struct cpuidle_driver *drv, int idx)
{
	struct lpm_cpu *cpu = per_cpu(cpu_lpm, dev->cpu);
	const struct cpumask *cpumask = get_cpu_mask(dev->cpu);

	for (; idx >= 0; idx--) {
		if (lpm_cpu_mode_allow(dev->cpu, idx, false))
			break;
	}
	if (idx < 0) {
		pr_err("Failed suspend\n");
		return;
	}

	cpu_prepare(cpu, idx, true);
	cluster_prepare(cpu->parent, cpumask, idx, false, 0);

	psci_enter_sleep(cpu, idx, false);

	cluster_unprepare(cpu->parent, cpumask, idx, false, 0);
	cpu_unprepare(cpu, idx, true);
}

#ifdef CONFIG_CPU_IDLE_MULTIPLE_DRIVERS
static int cpuidle_register_cpu(struct cpuidle_driver *drv,
		struct cpumask *mask)
@@ -1482,6 +1516,8 @@ static int cluster_cpuidle_register(struct lpm_cluster *cl)
			st->power_usage = cpu_level->pwr.ss_power;
			st->target_residency = 0;
			st->enter = lpm_cpuidle_enter;
			if (i == lpm_cpu->nlevels - 1)
				st->enter_freeze = lpm_cpuidle_freeze;
		}

		lpm_cpu->drv->state_count = lpm_cpu->nlevels;
@@ -1616,14 +1652,6 @@ static int lpm_suspend_enter(suspend_state_t state)
	cpu_prepare(lpm_cpu, idx, false);
	cluster_prepare(cluster, cpumask, idx, false, 0);

	/*
	 * Print the clocks which are enabled during system suspend
	 * This debug information is useful to know which are the
	 * clocks that are enabled and preventing the system level
	 * LPMs(XO and Vmin).
	 */
	clock_debug_print_enabled(true);

	psci_enter_sleep(lpm_cpu, idx, false);

	cluster_unprepare(cluster, cpumask, idx, false, 0);
@@ -1638,6 +1666,11 @@ static const struct platform_suspend_ops lpm_suspend_ops = {
	.wake = lpm_suspend_wake,
};

static const struct platform_freeze_ops lpm_freeze_ops = {
	.prepare = lpm_suspend_prepare,
	.restore = lpm_suspend_wake,
};

static int lpm_probe(struct platform_device *pdev)
{
	int ret;
@@ -1664,6 +1697,7 @@ static int lpm_probe(struct platform_device *pdev)
	 * how late lpm_levels gets initialized.
	 */
	suspend_set_ops(&lpm_suspend_ops);
	freeze_set_ops(&lpm_freeze_ops);
	hrtimer_init(&lpm_hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
	hrtimer_init(&histtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
	cluster_timer_init(lpm_root_node);