Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit c4a41dd9 authored by Ionela Voinescu's avatar Ionela Voinescu Committed by Abhijeet Dharmapurikar
Browse files

ANDROID: sched/fair: unify spare capacity calculation



Given that we have a few sites where the spare capacity of a CPU is
calculated as the difference between the original capacity of the CPU
and its computed new utilization, let's unify the calculation and use
that value tracked with a local spare_cap variable.

Change-Id: I78daece7543f78d4f74edbee5e9ceb62908af507
Signed-off-by: default avatarIonela Voinescu <ionela.voinescu@arm.com>
Git-commit: 5383285d
Git-repo: http://android.googlesource.com/kernel/common


[adharmap@codeaurora.org: spare_cap is already used in 4.14
baseline, rename that to spare_wake_cap to avoid name collision]
Signed-off-by: default avatarAbhijeet Dharmapurikar <adharmap@codeaurora.org>
parent 74bdb76f
Loading
Loading
Loading
Loading
+17 −9
Original line number Diff line number Diff line
@@ -7107,7 +7107,7 @@ static inline int find_best_target(struct task_struct *p, int *backup_cpu,
	int best_idle_cpu = -1;
	int target_cpu = -1;
	int cpu, i;
	long spare_cap, most_spare_cap = 0;
	long spare_wake_cap, most_spare_wake_cap = 0;
	int most_spare_cap_cpu = -1;
	unsigned int active_cpus_count = 0;
	int prev_cpu = task_cpu(p);
@@ -7151,6 +7151,7 @@ static inline int find_best_target(struct task_struct *p, int *backup_cpu,
			unsigned long capacity_curr = capacity_curr_of(i);
			unsigned long capacity_orig = capacity_orig_of(i);
			unsigned long wake_util, new_util, new_util_cuml;
			long spare_cap;

			trace_sched_cpu_util(i);

@@ -7175,10 +7176,10 @@ static inline int find_best_target(struct task_struct *p, int *backup_cpu,
			 */
			wake_util = cpu_util_wake(i, p);
			new_util = wake_util + task_util(p);
			spare_cap = capacity_orig_of(i) - wake_util;
			spare_wake_cap = capacity_orig_of(i) - wake_util;

			if (spare_cap > most_spare_cap) {
				most_spare_cap = spare_cap;
			if (spare_wake_cap > most_spare_wake_cap) {
				most_spare_wake_cap = spare_wake_cap;
				most_spare_cap_cpu = i;
			}

@@ -7202,6 +7203,13 @@ static inline int find_best_target(struct task_struct *p, int *backup_cpu,
			if (new_util > capacity_orig)
				continue;

			/*
			 * Pre-compute the maximum possible capacity we expect
			 * to have available on this CPU once the task is
			 * enqueued here.
			 */
			spare_cap = capacity_orig - new_util;

			/*
			 * Case A) Latency sensitive tasks
			 *
@@ -7250,9 +7258,9 @@ static inline int find_best_target(struct task_struct *p, int *backup_cpu,
				 * Case A.2: Target ACTIVE CPU
				 * Favor CPUs with max spare capacity.
				 */
				if ((capacity_curr > new_util) &&
					(capacity_orig - new_util > target_max_spare_cap)) {
					target_max_spare_cap = capacity_orig - new_util;
				if (capacity_curr > new_util &&
				    spare_cap > target_max_spare_cap) {
					target_max_spare_cap = spare_cap;
					target_cpu = i;
					continue;
				}
@@ -7374,10 +7382,10 @@ static inline int find_best_target(struct task_struct *p, int *backup_cpu,

			/* Favor CPUs with maximum spare capacity */
			if (capacity_orig >= target_capacity &&
			    (capacity_orig - new_util) < target_max_spare_cap)
			    spare_cap < target_max_spare_cap)
				continue;

			target_max_spare_cap = capacity_orig - new_util;
			target_max_spare_cap = spare_cap;
			target_capacity = capacity_orig;
			target_util = new_util;
			target_cpu = i;