Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit b4e3031d authored by Pavankumar Kondeti's avatar Pavankumar Kondeti
Browse files

sched/fair: Improve the scheduler



This change is for general scheduler improvement.

Change-Id: Ice00d43c70be105b5bc3ad09caa6047aa98d7402
Signed-off-by: default avatarPavankumar Kondeti <pkondeti@codeaurora.org>
parent d89e0499
Loading
Loading
Loading
Loading
+33 −3
Original line number Diff line number Diff line
@@ -11205,6 +11205,27 @@ update_next_balance(struct sched_domain *sd, unsigned long *next_balance)
		*next_balance = next;
}

#ifdef CONFIG_SCHED_WALT
static inline bool min_cap_cluster_has_misfit_task(void)
{
	int cpu;

	for_each_possible_cpu(cpu) {
		if (!is_min_capacity_cpu(cpu))
			break;
		if (cpu_rq(cpu)->walt_stats.nr_big_tasks)
			return true;
	}

	return false;
}
#else
static inline bool min_cap_cluster_has_misfit_task(void)
{
	return false;
}
#endif

/*
 * idle_balance is called by schedule() if this_cpu is about to become
 * idle. Attempts to pull tasks from other CPUs.
@@ -11216,6 +11237,7 @@ static int idle_balance(struct rq *this_rq, struct rq_flags *rf)
	struct sched_domain *sd;
	int pulled_task = 0;
	u64 curr_cost = 0;
	bool force_lb = false;

	if (cpu_isolated(this_cpu))
		return 0;
@@ -11232,6 +11254,13 @@ static int idle_balance(struct rq *this_rq, struct rq_flags *rf)
	if (!cpu_active(this_cpu))
		return 0;

	/*
	 * Force higher capacity CPUs doing load balance, when the lower
	 * capacity CPUs has some misfit tasks.
	 */
	if (!is_min_capacity_cpu(this_cpu) && min_cap_cluster_has_misfit_task())
		force_lb = true;

	/*
	 * This is OK, because current is on_cpu, which avoids it being picked
	 * for load-balance and preemption/IRQs are still disabled avoiding
@@ -11240,8 +11269,8 @@ static int idle_balance(struct rq *this_rq, struct rq_flags *rf)
	 */
	rq_unpin_lock(this_rq, rf);

	if (this_rq->avg_idle < sysctl_sched_migration_cost ||
	    !READ_ONCE(this_rq->rd->overload)) {
	if (!force_lb && (this_rq->avg_idle < sysctl_sched_migration_cost ||
	    !READ_ONCE(this_rq->rd->overload))) {
		rcu_read_lock();
		sd = rcu_dereference_check_sched_domain(this_rq->sd);
		if (sd)
@@ -11266,7 +11295,8 @@ static int idle_balance(struct rq *this_rq, struct rq_flags *rf)
			continue;
		}

		if (this_rq->avg_idle < curr_cost + sd->max_newidle_lb_cost) {
		if (!force_lb &&
		    this_rq->avg_idle < curr_cost + sd->max_newidle_lb_cost) {
			update_next_balance(sd, &next_balance);
			break;
		}