Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit ffedc6cb authored by Maulik Shah's avatar Maulik Shah Committed by Gerrit - the friendly Code Review server
Browse files

cpuidle: lpm-levels: Track and predict next rescheduling ipi



Add changes to track and predict next rescheduling ipi
based on past history. Add a module param to control enabling
it.

Change-Id: Ie495d8906288ee410708693ee15ed51643aefb44
Signed-off-by: default avatarMaulik Shah <mkshah@codeaurora.org>
parent 0f30cf7e
Loading
Loading
Loading
Loading
+3 −0
Original line number Diff line number Diff line
@@ -49,6 +49,8 @@
#include <asm/mach/arch.h>
#include <asm/mpu.h>

#include <soc/qcom/lpm_levels.h>

#define CREATE_TRACE_POINTS
#include <trace/events/ipi.h>

@@ -719,6 +721,7 @@ void handle_IPI(int ipinr, struct pt_regs *regs)

void smp_send_reschedule(int cpu)
{
	update_ipi_history(cpu);
	smp_cross_call(cpumask_of(cpu), IPI_RESCHEDULE);
}

+2 −0
Original line number Diff line number Diff line
@@ -51,6 +51,7 @@
#include <asm/ptrace.h>
#include <asm/virt.h>
#include <asm/system_misc.h>
#include <soc/qcom/lpm_levels.h>

#define CREATE_TRACE_POINTS
#include <trace/events/ipi.h>
@@ -985,6 +986,7 @@ void handle_IPI(int ipinr, struct pt_regs *regs)

void smp_send_reschedule(int cpu)
{
	update_ipi_history(cpu);
	smp_cross_call(cpumask_of(cpu), IPI_RESCHEDULE);
}

+3 −0
Original line number Diff line number Diff line
@@ -557,6 +557,9 @@ static int parse_cpu_levels(struct device_node *dn, struct lpm_cluster *c)
	if (ret)
		return ret;

	cpu->ipi_prediction = !(of_property_read_bool(dn,
					"qcom,disable-ipi-prediction"));

	cpu->lpm_prediction = !(of_property_read_bool(dn,
					"qcom,disable-prediction"));

+99 −47
Original line number Diff line number Diff line
@@ -82,6 +82,9 @@ struct lpm_cluster *lpm_root_node;
static bool lpm_prediction = true;
module_param_named(lpm_prediction, lpm_prediction, bool, 0664);

static bool lpm_ipi_prediction = true;
module_param_named(lpm_ipi_prediction, lpm_ipi_prediction, bool, 0664);

struct lpm_history {
	uint32_t resi[MAXSAMPLES];
	int mode[MAXSAMPLES];
@@ -92,8 +95,14 @@ struct lpm_history {
	int64_t stime;
};

static DEFINE_PER_CPU(struct lpm_history, hist);
struct ipi_history {
	uint32_t interval[MAXSAMPLES];
	uint32_t current_ptr;
	ktime_t cpu_idle_resched_ts;
};

static DEFINE_PER_CPU(struct lpm_history, hist);
static DEFINE_PER_CPU(struct ipi_history, cpu_ipi_history);
static DEFINE_PER_CPU(struct lpm_cpu*, cpu_lpm);
static bool suspend_in_progress;
static DEFINE_PER_CPU(struct hrtimer, histtimer);
@@ -322,48 +331,17 @@ static void clusttimer_start(struct lpm_cluster *cluster, uint32_t time_us)
				HRTIMER_MODE_REL_PINNED);
}

static uint64_t lpm_cpuidle_predict(struct cpuidle_device *dev,
		struct lpm_cpu *cpu, int *idx_restrict,
		uint32_t *idx_restrict_time)
static uint64_t find_deviation(int *interval, uint32_t ref_stddev,
				int64_t *stime)
{
	int i, j, divisor;
	int divisor, i;
	uint64_t max, avg, stddev;
	int64_t thresh = LLONG_MAX;
	struct lpm_history *history = &per_cpu(hist, dev->cpu);

	if (!lpm_prediction || !cpu->lpm_prediction)
		return 0;

	/*
	 * Samples are marked invalid when woken-up due to timer,
	 * so donot predict.
	 */
	if (history->hinvalid) {
		history->hinvalid = 0;
		history->htmr_wkup = 1;
		history->stime = 0;
		return 0;
	}

	/*
	 * Predict only when all the samples are collected.
	 */
	if (history->nsamp < MAXSAMPLES) {
		history->stime = 0;
		return 0;
	}

	/*
	 * Check if the samples are not much deviated, if so use the
	 * average of those as predicted sleep time. Else if any
	 * specific mode has more premature exits return the index of
	 * that mode.
	 */

again:
	do {
		max = avg = divisor = stddev = 0;
		for (i = 0; i < MAXSAMPLES; i++) {
		int64_t value = history->resi[i];
			int64_t value = interval[i];

			if (value <= thresh) {
				avg += value;
@@ -375,7 +353,7 @@ static uint64_t lpm_cpuidle_predict(struct cpuidle_device *dev,
		do_div(avg, divisor);

		for (i = 0; i < MAXSAMPLES; i++) {
		int64_t value = history->resi[i];
			int64_t value = interval[i];

			if (value <= thresh) {
				int64_t diff = value - avg;
@@ -391,14 +369,59 @@ static uint64_t lpm_cpuidle_predict(struct cpuidle_device *dev,
	 * ignore one maximum sample and retry
	 */
		if (((avg > stddev * 6) && (divisor >= (MAXSAMPLES - 1)))
					|| stddev <= cpu->ref_stddev) {
		history->stime = ktime_to_us(ktime_get()) + avg;
					|| stddev <= ref_stddev) {
			*stime = ktime_to_us(ktime_get()) + avg;
			return avg;
	} else if (divisor  > (MAXSAMPLES - 1)) {
		}
		thresh = max - 1;
		goto again;

	} while (divisor > (MAXSAMPLES - 1));

	return 0;
}

static uint64_t lpm_cpuidle_predict(struct cpuidle_device *dev,
		struct lpm_cpu *cpu, int *idx_restrict,
		uint32_t *idx_restrict_time, uint32_t *ipi_predicted)
{
	int i, j;
	uint64_t avg;
	struct lpm_history *history = &per_cpu(hist, dev->cpu);
	struct ipi_history *ipi_history = &per_cpu(cpu_ipi_history, dev->cpu);

	if (!lpm_prediction || !cpu->lpm_prediction)
		return 0;

	/*
	 * Samples are marked invalid when woken-up due to timer,
	 * so donot predict.
	 */
	if (history->hinvalid) {
		history->hinvalid = 0;
		history->htmr_wkup = 1;
		history->stime = 0;
		return 0;
	}

	/*
	 * Predict only when all the samples are collected.
	 */
	if (history->nsamp < MAXSAMPLES) {
		history->stime = 0;
		return 0;
	}

	/*
	 * Check if the samples are not much deviated, if so use the
	 * average of those as predicted sleep time. Else if any
	 * specific mode has more premature exits return the index of
	 * that mode.
	 */

	avg = find_deviation(history->resi, cpu->ref_stddev, &(history->stime));
	if (avg)
		return avg;

	/*
	 * Find the number of premature exits for each of the mode,
	 * excluding clockgating mode, and they are more than fifty
@@ -440,6 +463,18 @@ static uint64_t lpm_cpuidle_predict(struct cpuidle_device *dev,
			}
		}
	}

	if (*idx_restrict_time || !cpu->ipi_prediction || !lpm_ipi_prediction)
		return 0;

	avg = find_deviation(ipi_history->interval, cpu->ref_stddev
						+ DEFAULT_IPI_STDDEV,
						&(history->stime));
	if (avg) {
		*ipi_predicted = 1;
		return avg;
	}

	return 0;
}

@@ -519,7 +554,7 @@ static int cpu_power_select(struct cpuidle_device *dev,
	int i, idx_restrict;
	uint32_t lvl_latency_us = 0;
	uint64_t predicted = 0;
	uint32_t htime = 0, idx_restrict_time = 0;
	uint32_t htime = 0, idx_restrict_time = 0, ipi_predicted = 0;
	uint32_t next_wakeup_us = (uint32_t)sleep_us;
	uint32_t min_residency, max_residency;
	struct power_params *pwr_params;
@@ -549,7 +584,8 @@ static int cpu_power_select(struct cpuidle_device *dev,
			 */
			if (next_wakeup_us > max_residency) {
				predicted = lpm_cpuidle_predict(dev, cpu,
					&idx_restrict, &idx_restrict_time);
					&idx_restrict, &idx_restrict_time,
					&ipi_predicted);
				if (predicted && (predicted < min_residency))
					predicted = min_residency;
			} else
@@ -578,7 +614,9 @@ static int cpu_power_select(struct cpuidle_device *dev,
	if ((predicted || (idx_restrict != cpu->nlevels + 1)) &&
	    (best_level < (cpu->nlevels-1))) {
		htime = predicted + cpu->tmr_add;
		if (htime == cpu->tmr_add)
		if (lpm_ipi_prediction && cpu->ipi_prediction)
			htime += DEFAULT_IPI_TIMER_ADD;
		if (!predicted)
			htime = idx_restrict_time;
		else if (htime > max_residency)
			htime = max_residency;
@@ -591,8 +629,8 @@ static int cpu_power_select(struct cpuidle_device *dev,
done_select:
	trace_cpu_power_select(best_level, sleep_us, latency_us, 0);

	trace_cpu_pred_select(idx_restrict_time ? 2 : (predicted ? 1 : 0),
			predicted, htime);
	trace_cpu_pred_select(idx_restrict_time ? 2 : (ipi_predicted ?
				3 : (predicted ? 1 : 0)), predicted, htime);

	return best_level;
}
@@ -1217,6 +1255,20 @@ static int lpm_cpuidle_select(struct cpuidle_driver *drv,
	return cpu_power_select(dev, cpu);
}

void update_ipi_history(int cpu)
{
	struct ipi_history *history = &per_cpu(cpu_ipi_history, cpu);
	ktime_t now = ktime_get();

	history->interval[history->current_ptr] =
			ktime_to_us(ktime_sub(now,
			history->cpu_idle_resched_ts));
	(history->current_ptr)++;
	if (history->current_ptr >= MAXSAMPLES)
		history->current_ptr = 0;
	history->cpu_idle_resched_ts = now;
}

static void update_history(struct cpuidle_device *dev, int idx)
{
	struct lpm_history *history = &per_cpu(hist, dev->cpu);
+4 −1
Original line number Diff line number Diff line
/* SPDX-License-Identifier: GPL-2.0-only */

/*
 * Copyright (c) 2014-2019, The Linux Foundation. All rights reserved.
 * Copyright (c) 2014-2020, The Linux Foundation. All rights reserved.
 */

#ifndef __LPM_LEVELS_H__
@@ -14,7 +14,9 @@
#define CLUST_SMPL_INVLD_TIME 40000
#define DEFAULT_PREMATURE_CNT 3
#define DEFAULT_STDDEV 100
#define DEFAULT_IPI_STDDEV 400
#define DEFAULT_TIMER_ADD 100
#define DEFAULT_IPI_TIMER_ADD 900
#define TIMER_ADD_LOW 100
#define TIMER_ADD_HIGH 1500
#define STDDEV_LOW 100
@@ -48,6 +50,7 @@ struct lpm_cpu {
	uint32_t ref_premature_cnt;
	uint32_t tmr_add;
	bool lpm_prediction;
	bool ipi_prediction;
	struct cpuidle_driver *drv;
	struct lpm_cluster *parent;
	ktime_t next_hrtimer;
Loading