Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 3cd03903 authored by Joonwoo Park's avatar Joonwoo Park
Browse files

sched: add option whether CPU C-state is used to guide task placement



There are CPUs that don't have an obvious low power mode exit latency
penalty.  Add a new Kconfig CONFIG_SCHED_HMP_CSTATE_AWARE which controls
whether CPU C-state is used to guide task placement.

CRs-fixed: 1006303
Change-Id: Ie8dbab8e173c3a1842d922f4d1fbd8cc4221789c
Signed-off-by: default avatarJoonwoo Park <joonwoop@codeaurora.org>
parent 182276b6
Loading
Loading
Loading
Loading
+8 −0
Original line number Diff line number Diff line
@@ -1164,6 +1164,14 @@ config SCHED_HMP
	  in their instructions per-cycle capability or the maximum
	  frequency they can attain.

config SCHED_HMP_CSTATE_AWARE
	bool "CPU C-state aware scheduler"
	depends on SCHED_HMP
	help
	  This feature will let the HMP scheduler optimize task placement
	  with CPUs C-state. If this is enabled, scheduler places tasks
	  onto the shallowest C-state CPU among the most power efficient CPUs.

config SCHED_QHMP
	bool "QHMP scheduler extensions"
	depends on SCHED_HMP
+47 −19
Original line number Diff line number Diff line
@@ -3148,28 +3148,13 @@ next_best_cluster(struct sched_cluster *cluster, struct cpu_select_env *env,
	return next;
}

static void update_cluster_stats(int cpu, struct cluster_cpu_stats *stats,
					 struct cpu_select_env *env)
#ifdef SCHED_HMP_CSTATE_AWARE
static void __update_cluster_stats(int cpu, struct cluster_cpu_stats *stats,
				   struct cpu_select_env *env, int cpu_cost)
{
	int cpu_cost, cpu_cstate;
	int cpu_cstate;
	int prev_cpu = env->prev_cpu;

	cpu_cost = power_cost(cpu, task_load(env->p) +
				cpu_cravg_sync(cpu, env->sync));
	if (cpu_cost > stats->min_cost)
		return;

	if (cpu != prev_cpu && cpus_share_cache(prev_cpu, cpu)) {
		if (stats->best_sibling_cpu_cost > cpu_cost ||
		    (stats->best_sibling_cpu_cost == cpu_cost &&
		     stats->best_sibling_cpu_load > env->cpu_load)) {

			stats->best_sibling_cpu_cost = cpu_cost;
			stats->best_sibling_cpu_load = env->cpu_load;
			stats->best_sibling_cpu = cpu;
		}
	}

	cpu_cstate = cpu_rq(cpu)->cstate;

	if (env->need_idle) {
@@ -3224,6 +3209,49 @@ static void update_cluster_stats(int cpu, struct cluster_cpu_stats *stats,
		stats->best_cpu = cpu;
	}
}
#else /* SCHED_HMP_CSTATE_AWARE */
static void __update_cluster_stats(int cpu, struct cluster_cpu_stats *stats,
				   struct cpu_select_env *env, int cpu_cost)
{
	int prev_cpu = env->prev_cpu;

	if (cpu != prev_cpu && cpus_share_cache(prev_cpu, cpu)) {
		if (stats->best_sibling_cpu_cost > cpu_cost ||
		    (stats->best_sibling_cpu_cost == cpu_cost &&
		     stats->best_sibling_cpu_load > env->cpu_load)) {
			stats->best_sibling_cpu_cost = cpu_cost;
			stats->best_sibling_cpu_load = env->cpu_load;
			stats->best_sibling_cpu = cpu;
		}
	}

	if ((cpu_cost < stats->min_cost) ||
	    ((stats->best_cpu != prev_cpu &&
	      stats->min_load > env->cpu_load) || cpu == prev_cpu)) {
		if (env->need_idle) {
			if (idle_cpu(cpu)) {
				stats->min_cost = cpu_cost;
				stats->best_idle_cpu = cpu;
			}
		} else {
			stats->min_cost = cpu_cost;
			stats->min_load = env->cpu_load;
			stats->best_cpu = cpu;
		}
	}
}
#endif

static void update_cluster_stats(int cpu, struct cluster_cpu_stats *stats,
					 struct cpu_select_env *env)
{
	int cpu_cost;

	cpu_cost = power_cost(cpu, task_load(env->p) +
				cpu_cravg_sync(cpu, env->sync));
	if (cpu_cost <= stats->min_cost)
		__update_cluster_stats(cpu, stats, env, cpu_cost);
}

static void find_best_cpu_in_cluster(struct sched_cluster *c,
	 struct cpu_select_env *env, struct cluster_cpu_stats *stats)