Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 239b188c authored by Abhijeet Dharmapurikar's avatar Abhijeet Dharmapurikar
Browse files

sched: use cuml demand for idle selection



Currently we choose a idle cpu if its in a shallower c state than
the previous one. But if it comes across a idle cpu with the same
cstate, it skips it.

Instead of skipping it, check if it has a lower cuml demand and if
so prefer that cpu over the previous one. But prefer previous cpu
over a cpu with lower cuml demand.

Change-Id: I94b74b1b1c91d1cc7d4b5f40fb4f79558271e992
Signed-off-by: default avatarAbhijeet Dharmapurikar <adharmap@codeaurora.org>
parent f079246b
Loading
Loading
Loading
Loading
+10 −1
Original line number Original line Diff line number Diff line
@@ -7086,6 +7086,7 @@ static inline int find_best_target(struct task_struct *p, int *backup_cpu,
	unsigned long target_util = ULONG_MAX;
	unsigned long target_util = ULONG_MAX;
	unsigned long best_active_util = ULONG_MAX;
	unsigned long best_active_util = ULONG_MAX;
	unsigned long best_active_cuml_util = ULONG_MAX;
	unsigned long best_active_cuml_util = ULONG_MAX;
	unsigned long best_idle_cuml_util = ULONG_MAX;
	int best_idle_cstate = INT_MAX;
	int best_idle_cstate = INT_MAX;
	struct sched_domain *sd;
	struct sched_domain *sd;
	struct sched_group *sg;
	struct sched_group *sg;
@@ -7096,6 +7097,7 @@ static inline int find_best_target(struct task_struct *p, int *backup_cpu,
	long spare_cap, most_spare_cap = 0;
	long spare_cap, most_spare_cap = 0;
	int most_spare_cap_cpu = -1;
	int most_spare_cap_cpu = -1;
	unsigned int active_cpus_count = 0;
	unsigned int active_cpus_count = 0;
	int prev_cpu = task_cpu(p);


	*backup_cpu = -1;
	*backup_cpu = -1;


@@ -7292,12 +7294,19 @@ static inline int find_best_target(struct task_struct *p, int *backup_cpu,
				 * shallow idle big CPU.
				 * shallow idle big CPU.
				 */
				 */
				if (sysctl_sched_cstate_aware &&
				if (sysctl_sched_cstate_aware &&
				    best_idle_cstate <= idle_idx)
				    best_idle_cstate < idle_idx)
					continue;

				if (best_idle_cstate == idle_idx &&
					(best_idle_cpu == prev_cpu ||
					(i != prev_cpu &&
					new_util_cuml > best_idle_cuml_util)))
					continue;
					continue;


				/* Keep track of best idle CPU */
				/* Keep track of best idle CPU */
				target_capacity = capacity_orig;
				target_capacity = capacity_orig;
				best_idle_cstate = idle_idx;
				best_idle_cstate = idle_idx;
				best_idle_cuml_util = new_util_cuml;
				best_idle_cpu = i;
				best_idle_cpu = i;
				continue;
				continue;
			}
			}