Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 3b95a25b authored by qctecmdr's avatar qctecmdr Committed by Gerrit - the friendly Code Review server
Browse files

Merge "sched/fair: Allow prev cpu in find best target"

parents fc9678a4 74ccfe03
Loading
Loading
Loading
Loading
+27 −32
Original line number Diff line number Diff line
@@ -3851,36 +3851,46 @@ struct find_best_target_env {
	int fastpath;
};

static bool is_packing_eligible(struct task_struct *p, int target_cpu,
static inline void adjust_cpus_for_packing(struct task_struct *p,
			int *target_cpu, int *best_idle_cpu,
			int target_cpus_count, int shallowest_idle_cstate,
			struct find_best_target_env *fbt_env,
				unsigned int target_cpus_count,
				int best_idle_cstate, bool boosted)
			bool boosted)
{
	unsigned long tutil, estimated_capacity;

	if (task_placement_boost_enabled(p) || fbt_env->need_idle || boosted)
		return false;
	if (*best_idle_cpu == -1 || *target_cpu == -1)
		return;

	if (best_idle_cstate == -1)
		return false;
	if (task_placement_boost_enabled(p) || fbt_env->need_idle || boosted ||
			shallowest_idle_cstate == -1) {
		*target_cpu = -1;
		return;
	}

	if (target_cpus_count != 1)
		return true;
	if (target_cpus_count > 1)
		return;

	if (task_in_cum_window_demand(cpu_rq(target_cpu), p))
	if (task_in_cum_window_demand(cpu_rq(*target_cpu), p))
		tutil = 0;
	else
		tutil = task_util(p);

	estimated_capacity = cpu_util_cum(target_cpu, tutil);
	estimated_capacity = cpu_util_cum(*target_cpu, tutil);
	estimated_capacity = add_capacity_margin(estimated_capacity,
							target_cpu);
							*target_cpu);

	/*
	 * If there is only one active CPU and it is already above its current
	 * capacity, avoid placing additional task on the CPU.
	 */
	return (estimated_capacity <= capacity_curr_of(target_cpu));
	if (estimated_capacity > capacity_curr_of(*target_cpu)) {
		*target_cpu = -1;
		return;
	}

	if (fbt_env->rtg_target)
		*best_idle_cpu = -1;
}

static inline void update_misfit_status(struct task_struct *p, struct rq *rq)
@@ -7069,11 +7079,9 @@ static void find_best_target(struct sched_domain *sd, cpumask_t *cpus,

	} while (sg = sg->next, sg != start_sd->groups);

	if (best_idle_cpu != -1 && !is_packing_eligible(p, target_cpu, fbt_env,
			active_cpus_count, shallowest_idle_cstate, boosted)) {
		target_cpu = best_idle_cpu;
		best_idle_cpu = -1;
	}
	adjust_cpus_for_packing(p, &target_cpu, &best_idle_cpu,
				active_cpus_count, shallowest_idle_cstate,
				fbt_env, boosted);

	/*
	 * For non latency sensitive tasks, cases B and C in the previous loop,
@@ -7115,19 +7123,6 @@ static void find_best_target(struct sched_domain *sd, cpumask_t *cpus,
		!(p->state == TASK_RUNNING && !idle_cpu(most_spare_cap_cpu)))
		target_cpu = most_spare_cap_cpu;

	/*
	 * The next step of energy evaluation includes
	 * prev_cpu. Drop target or backup if it is
	 * same as prev_cpu
	 */
	if (backup_cpu == prev_cpu)
		backup_cpu = -1;

	if (target_cpu == prev_cpu) {
		target_cpu = backup_cpu;
		backup_cpu = -1;
	}

	if (target_cpu == -1 && isolated_candidate != -1 &&
					cpu_isolated(prev_cpu))
		target_cpu = isolated_candidate;