Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit bc911ce2 authored by qctecmdr Service's avatar qctecmdr Service Committed by Gerrit - the friendly Code Review server
Browse files

Merge "ANDROID: sched/fair: return idle CPU immediately for prefer_idle"

parents d51cdc85 18eaaf28
Loading
Loading
Loading
Loading
+79 −23
Original line number Diff line number Diff line
@@ -7107,13 +7107,25 @@ static inline int find_best_target(struct task_struct *p, int *backup_cpu,
	int best_idle_cpu = -1;
	int target_cpu = -1;
	int cpu, i;
	long spare_cap, most_spare_cap = 0;
	long spare_wake_cap, most_spare_wake_cap = 0;
	int most_spare_cap_cpu = -1;
	unsigned int active_cpus_count = 0;
	int prev_cpu = task_cpu(p);

	*backup_cpu = -1;

	/*
	 * In most cases, target_capacity tracks capacity_orig of the most
	 * energy efficient CPU candidate, thus requiring to minimise
	 * target_capacity. For these cases target_capacity is already
	 * initialized to ULONG_MAX.
	 * However, for prefer_idle and boosted tasks we look for a high
	 * performance CPU, thus requiring to maximise target_capacity. In this
	 * case we initialise target_capacity to 0.
	 */
	if (prefer_idle && boosted)
		target_capacity = 0;

	/* Find start CPU based on boost value */
	cpu = start_cpu(p, boosted, fbt_env->rtg_target);
	if (cpu < 0)
@@ -7151,6 +7163,8 @@ static inline int find_best_target(struct task_struct *p, int *backup_cpu,
			unsigned long capacity_curr = capacity_curr_of(i);
			unsigned long capacity_orig = capacity_orig_of(i);
			unsigned long wake_util, new_util, new_util_cuml;
			long spare_cap;
			int idle_idx = INT_MAX;

			trace_sched_cpu_util(i);

@@ -7175,10 +7189,10 @@ static inline int find_best_target(struct task_struct *p, int *backup_cpu,
			 */
			wake_util = cpu_util_wake(i, p);
			new_util = wake_util + task_util(p);
			spare_cap = capacity_orig_of(i) - wake_util;
			spare_wake_cap = capacity_orig_of(i) - wake_util;

			if (spare_cap > most_spare_cap) {
				most_spare_cap = spare_cap;
			if (spare_wake_cap > most_spare_wake_cap) {
				most_spare_wake_cap = spare_wake_cap;
				most_spare_cap_cpu = i;
			}

@@ -7202,6 +7216,17 @@ static inline int find_best_target(struct task_struct *p, int *backup_cpu,
			if (new_util > capacity_orig)
				continue;

			/*
			 * Pre-compute the maximum possible capacity we expect
			 * to have available on this CPU once the task is
			 * enqueued here.
			 */
			spare_cap = capacity_orig - new_util;

			if (idle_cpu(i))
				idle_idx = idle_get_state_idx(cpu_rq(i));


			/*
			 * Case A) Latency sensitive tasks
			 *
@@ -7234,25 +7259,39 @@ static inline int find_best_target(struct task_struct *p, int *backup_cpu,

				/*
				 * Case A.1: IDLE CPU
				 * Return the first IDLE CPU we find.
				 * Return the best IDLE CPU we find:
				 * - for boosted tasks: the CPU with the highest
				 * performance (i.e. biggest capacity_orig)
				 * - for !boosted tasks: the most energy
				 * efficient CPU (i.e. smallest capacity_orig)
				 */
				if (idle_cpu(i)) {
					trace_sched_find_best_target(p,
							prefer_idle, min_util,
							cpu, best_idle_cpu,
							best_active_cpu,
							-1, i, -1);
					if (boosted &&
					    capacity_orig < target_capacity)
						continue;
					if (!boosted &&
					    capacity_orig > target_capacity)
						continue;
					if (capacity_orig == target_capacity &&
					    sysctl_sched_cstate_aware &&
					    best_idle_cstate <= idle_idx)
						continue;

					return i;
					target_capacity = capacity_orig;
					best_idle_cstate = idle_idx;
					best_idle_cpu = i;
					continue;
				}
				if (best_idle_cpu != -1)
					continue;

				/*
				 * Case A.2: Target ACTIVE CPU
				 * Favor CPUs with max spare capacity.
				 */
				if ((capacity_curr > new_util) &&
					(capacity_orig - new_util > target_max_spare_cap)) {
					target_max_spare_cap = capacity_orig - new_util;
				if (capacity_curr > new_util &&
				    spare_cap > target_max_spare_cap) {
					target_max_spare_cap = spare_cap;
					target_cpu = i;
					continue;
				}
@@ -7318,8 +7357,6 @@ static inline int find_best_target(struct task_struct *p, int *backup_cpu,
			 * consumptions without affecting performance.
			 */
			if (idle_cpu(i)) {
				int idle_idx = idle_get_state_idx(cpu_rq(i));

				/*
				 * Skip CPUs in deeper idle state, but only
				 * if they are also less energy efficient.
@@ -7374,10 +7411,10 @@ static inline int find_best_target(struct task_struct *p, int *backup_cpu,

			/* Favor CPUs with maximum spare capacity */
			if (capacity_orig >= target_capacity &&
			    (capacity_orig - new_util) < target_max_spare_cap)
			    spare_cap < target_max_spare_cap)
				continue;

			target_max_spare_cap = capacity_orig - new_util;
			target_max_spare_cap = spare_cap;
			target_capacity = capacity_orig;
			target_util = new_util;
			target_cpu = i;
@@ -7413,7 +7450,7 @@ static inline int find_best_target(struct task_struct *p, int *backup_cpu,
	 *
	 * - prefer_idle tasks:
	 *
	 *   a) IDLE CPU available, we return immediately
	 *   a) IDLE CPU available: best_idle_cpu
	 *   b) ACTIVE CPU where task fits and has the bigger maximum spare
	 *      capacity (i.e. target_cpu)
	 *   c) ACTIVE CPU with less contention due to other tasks
@@ -7424,6 +7461,15 @@ static inline int find_best_target(struct task_struct *p, int *backup_cpu,
	 *   a) ACTIVE CPU: target_cpu
	 *   b) IDLE CPU: best_idle_cpu
	 */

	if (prefer_idle && (best_idle_cpu != -1)) {
		trace_sched_find_best_target(p, prefer_idle, min_util, cpu,
					     best_idle_cpu, best_active_cpu,
					     -1, best_idle_cpu, -1);

		return best_idle_cpu;
	}

	if (target_cpu == -1)
		target_cpu = prefer_idle
			? best_active_cpu
@@ -7673,6 +7719,7 @@ static int find_energy_efficient_cpu(struct sched_domain *sd,
	int use_fbt = sched_feat(FIND_BEST_TARGET);
	int cpu_iter, eas_cpu_idx = EAS_CPU_NXT;
	int energy_cpu = prev_cpu, delta = 0;
	int target_cpu = -1;
	struct energy_env *eenv;
	struct cpumask *rtg_target = find_rtg_target(p);
	struct find_best_target_env fbt_env;
@@ -7749,9 +7796,18 @@ static int find_energy_efficient_cpu(struct sched_domain *sd,
		fbt_env.need_idle = need_idle;

		/* Find a cpu with sufficient capacity */
		eenv->cpu[EAS_CPU_NXT].cpu_id = find_best_target(p,
				&eenv->cpu[EAS_CPU_BKP].cpu_id,
		target_cpu = find_best_target(p, &eenv->cpu[EAS_CPU_BKP].cpu_id,
					      boosted, prefer_idle, &fbt_env);

		/* Immediately return a found idle CPU for a prefer_idle task */
		if (prefer_idle && target_cpu >= 0 && idle_cpu(target_cpu)) {
			energy_cpu = target_cpu;
			goto out;
		}

		/* Place target into NEXT slot */
		eenv->cpu[EAS_CPU_NXT].cpu_id = target_cpu;

		next_cpu = eenv->cpu[EAS_CPU_NXT].cpu_id;
		backup_cpu = eenv->cpu[EAS_CPU_BKP].cpu_id;