Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 118408e5 authored by qctecmdr's avatar qctecmdr Committed by Gerrit - the friendly Code Review server
Browse files

Merge "sched/fair: Improve the scheduler"

parents a0d9af0d b4e3031d
Loading
Loading
Loading
Loading
+36 −4
Original line number Diff line number Diff line
@@ -203,6 +203,7 @@ unsigned int sysctl_sched_min_task_util_for_boost = 51;
/* 0.68ms default for 20ms window size scaled to 1024 */
unsigned int sysctl_sched_min_task_util_for_colocation = 35;
#endif
static unsigned int __maybe_unused sched_small_task_threshold = 102;

static inline void update_load_add(struct load_weight *lw, unsigned long inc)
{
@@ -10709,7 +10710,8 @@ static struct rq *find_busiest_queue(struct lb_env *env,
		 */
		if (env->sd->flags & SD_ASYM_CPUCAPACITY &&
		    capacity_of(env->dst_cpu) < capacity &&
		    rq->nr_running == 1)
		    (rq->nr_running == 1 || (rq->nr_running == 2 &&
		     task_util(rq->curr) < sched_small_task_threshold)))
			continue;

		wl = weighted_cpuload(rq);
@@ -11203,6 +11205,27 @@ update_next_balance(struct sched_domain *sd, unsigned long *next_balance)
		*next_balance = next;
}

#ifdef CONFIG_SCHED_WALT
static inline bool min_cap_cluster_has_misfit_task(void)
{
	int cpu;

	for_each_possible_cpu(cpu) {
		if (!is_min_capacity_cpu(cpu))
			break;
		if (cpu_rq(cpu)->walt_stats.nr_big_tasks)
			return true;
	}

	return false;
}
#else
static inline bool min_cap_cluster_has_misfit_task(void)
{
	return false;
}
#endif

/*
 * idle_balance is called by schedule() if this_cpu is about to become
 * idle. Attempts to pull tasks from other CPUs.
@@ -11214,6 +11237,7 @@ static int idle_balance(struct rq *this_rq, struct rq_flags *rf)
	struct sched_domain *sd;
	int pulled_task = 0;
	u64 curr_cost = 0;
	bool force_lb = false;

	if (cpu_isolated(this_cpu))
		return 0;
@@ -11230,6 +11254,13 @@ static int idle_balance(struct rq *this_rq, struct rq_flags *rf)
	if (!cpu_active(this_cpu))
		return 0;

	/*
	 * Force higher capacity CPUs doing load balance, when the lower
	 * capacity CPUs has some misfit tasks.
	 */
	if (!is_min_capacity_cpu(this_cpu) && min_cap_cluster_has_misfit_task())
		force_lb = true;

	/*
	 * This is OK, because current is on_cpu, which avoids it being picked
	 * for load-balance and preemption/IRQs are still disabled avoiding
@@ -11238,8 +11269,8 @@ static int idle_balance(struct rq *this_rq, struct rq_flags *rf)
	 */
	rq_unpin_lock(this_rq, rf);

	if (this_rq->avg_idle < sysctl_sched_migration_cost ||
	    !READ_ONCE(this_rq->rd->overload)) {
	if (!force_lb && (this_rq->avg_idle < sysctl_sched_migration_cost ||
	    !READ_ONCE(this_rq->rd->overload))) {
		rcu_read_lock();
		sd = rcu_dereference_check_sched_domain(this_rq->sd);
		if (sd)
@@ -11264,7 +11295,8 @@ static int idle_balance(struct rq *this_rq, struct rq_flags *rf)
			continue;
		}

		if (this_rq->avg_idle < curr_cost + sd->max_newidle_lb_cost) {
		if (!force_lb &&
		    this_rq->avg_idle < curr_cost + sd->max_newidle_lb_cost) {
			update_next_balance(sd, &next_balance);
			break;
		}