Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 2c212faa authored by Srinivas Rao L's avatar Srinivas Rao L Committed by Maulik Shah
Browse files

cpuidle: lpm_levels: Wakeup biased cpu



If a biased cpu entered shallowest LPM state and
there are no wakeups for it, can stay in the shallowest
state for long.

Program wakeup for the biased CPU to wakeup after the
expected bias window is completed, so that the cpu can
enter a deeper state.

Change-Id: Ic92c779f0f8b1fa85aa8b3afa68d075f8d5d7dd6
Signed-off-by: default avatarSrinivas Rao L <lsrao@codeaurora.org>
parent 51baf5ad
Loading
Loading
Loading
Loading
+50 −5
Original line number Diff line number Diff line
@@ -106,6 +106,7 @@ static DEFINE_PER_CPU(struct ipi_history, cpu_ipi_history);
static DEFINE_PER_CPU(struct lpm_cpu*, cpu_lpm);
static bool suspend_in_progress;
static DEFINE_PER_CPU(struct hrtimer, histtimer);
static DEFINE_PER_CPU(struct hrtimer, biastimer);
static struct lpm_debug *lpm_debug;
static phys_addr_t lpm_debug_phys;
static const int num_dbg_elements = 0x100;
@@ -331,6 +332,34 @@ static void clusttimer_start(struct lpm_cluster *cluster, uint32_t time_us)
				HRTIMER_MODE_REL_PINNED);
}

static void biastimer_cancel(void)
{
	unsigned int cpu = raw_smp_processor_id();
	struct hrtimer *cpu_biastimer = &per_cpu(biastimer, cpu);
	ktime_t time_rem;

	time_rem = hrtimer_get_remaining(cpu_biastimer);
	if (ktime_to_us(time_rem) <= 0)
		return;

	hrtimer_try_to_cancel(cpu_biastimer);
}

static enum hrtimer_restart biastimer_fn(struct hrtimer *h)
{
	return HRTIMER_NORESTART;
}

static void biastimer_start(uint32_t time_ns)
{
	ktime_t bias_ktime = ns_to_ktime(time_ns);
	unsigned int cpu = raw_smp_processor_id();
	struct hrtimer *cpu_biastimer = &per_cpu(biastimer, cpu);

	cpu_biastimer->function = biastimer_fn;
	hrtimer_start(cpu_biastimer, bias_ktime, HRTIMER_MODE_REL_PINNED);
}

static uint64_t find_deviation(int *interval, uint32_t ref_stddev,
				int64_t *stime)
{
@@ -517,15 +546,23 @@ static void clear_predict_history(void)

static void update_history(struct cpuidle_device *dev, int idx);

static inline bool lpm_disallowed(s64 sleep_us, int cpu)
static inline bool lpm_disallowed(s64 sleep_us, int cpu, struct lpm_cpu *pm_cpu)
{
	if ((sleep_disabled && !check_cpu_isolated(cpu)) ||
						sched_lpm_disallowed_time(cpu))
	uint64_t bias_time = 0;

	if (check_cpu_isolated(cpu))
		goto out;

	if (sleep_disabled || sleep_us < 0)
		return true;

	if (sleep_us < 0)
	bias_time = sched_lpm_disallowed_time(cpu);
	if (bias_time) {
		pm_cpu->bias = bias_time;
		return true;
	}

out:
	return false;
}

@@ -559,7 +596,7 @@ static int cpu_power_select(struct cpuidle_device *dev,
	uint32_t min_residency, max_residency;
	struct power_params *pwr_params;

	if (lpm_disallowed(sleep_us, dev->cpu))
	if (lpm_disallowed(sleep_us, dev->cpu, cpu))
		goto done_select;

	idx_restrict = cpu->nlevels + 1;
@@ -1210,6 +1247,8 @@ static int psci_enter_sleep(struct lpm_cpu *cpu, int idx, bool from_idle)
	 */

	if (!idx) {
		if (cpu->bias)
			biastimer_start(cpu->bias);
		stop_critical_timings();
		cpu_do_idle();
		start_critical_timings();
@@ -1341,6 +1380,10 @@ static int lpm_cpuidle_enter(struct cpuidle_device *dev,
		histtimer_cancel();
		clusttimer_cancel();
	}
	if (cpu->bias) {
		biastimer_cancel();
		cpu->bias = 0;
	}
	local_irq_enable();
	return idx;
}
@@ -1634,6 +1677,8 @@ static int lpm_probe(struct platform_device *pdev)
	for_each_possible_cpu(cpu) {
		cpu_histtimer = &per_cpu(histtimer, cpu);
		hrtimer_init(cpu_histtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
		cpu_histtimer = &per_cpu(biastimer, cpu);
		hrtimer_init(cpu_histtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
	}

	cluster_timer_init(lpm_root_node);
+1 −0
Original line number Diff line number Diff line
@@ -51,6 +51,7 @@ struct lpm_cpu {
	uint32_t tmr_add;
	bool lpm_prediction;
	bool ipi_prediction;
	uint64_t bias;
	struct cpuidle_driver *drv;
	struct lpm_cluster *parent;
	ktime_t next_hrtimer;