Loading kernel/sched/fair.c +20 −3 Original line number Diff line number Diff line Loading @@ -11549,6 +11549,20 @@ static inline bool nohz_idle_balance(struct rq *this_rq, enum cpu_idle_type idle static inline void nohz_newidle_balance(struct rq *this_rq) { } #endif /* CONFIG_NO_HZ_COMMON */ static bool silver_has_big_tasks(void) { int cpu; for_each_possible_cpu(cpu) { if (!is_min_capacity_cpu(cpu)) break; if (cpu_rq(cpu)->walt_stats.nr_big_tasks) return true; } return false; } /* * idle_balance is called by schedule() if this_cpu is about to become * idle. Attempts to pull tasks from other CPUs. Loading @@ -11560,6 +11574,7 @@ static int idle_balance(struct rq *this_rq, struct rq_flags *rf) struct sched_domain *sd; int pulled_task = 0; u64 curr_cost = 0; u64 avg_idle = this_rq->avg_idle; if (cpu_isolated(this_cpu)) return 0; Loading @@ -11575,7 +11590,9 @@ static int idle_balance(struct rq *this_rq, struct rq_flags *rf) */ if (!cpu_active(this_cpu)) return 0; if (!is_min_capacity_cpu(this_cpu) && silver_has_big_tasks() && (atomic_read(&this_rq->nr_iowait) == 0)) avg_idle = ULLONG_MAX; /* * This is OK, because current is on_cpu, which avoids it being picked * for load-balance and preemption/IRQs are still disabled avoiding Loading @@ -11584,7 +11601,7 @@ static int idle_balance(struct rq *this_rq, struct rq_flags *rf) */ rq_unpin_lock(this_rq, rf); if (this_rq->avg_idle < sysctl_sched_migration_cost || if (avg_idle < sysctl_sched_migration_cost || !READ_ONCE(this_rq->rd->overload)) { rcu_read_lock(); Loading @@ -11609,7 +11626,7 @@ static int idle_balance(struct rq *this_rq, struct rq_flags *rf) if (!(sd->flags & SD_LOAD_BALANCE)) continue; if (this_rq->avg_idle < curr_cost + sd->max_newidle_lb_cost) { if (avg_idle < curr_cost + sd->max_newidle_lb_cost) { update_next_balance(sd, &next_balance); break; } Loading Loading
kernel/sched/fair.c +20 −3 Original line number Diff line number Diff line Loading @@ -11549,6 +11549,20 @@ static inline bool nohz_idle_balance(struct rq *this_rq, enum cpu_idle_type idle static inline void nohz_newidle_balance(struct rq *this_rq) { } #endif /* CONFIG_NO_HZ_COMMON */ static bool silver_has_big_tasks(void) { int cpu; for_each_possible_cpu(cpu) { if (!is_min_capacity_cpu(cpu)) break; if (cpu_rq(cpu)->walt_stats.nr_big_tasks) return true; } return false; } /* * idle_balance is called by schedule() if this_cpu is about to become * idle. Attempts to pull tasks from other CPUs. Loading @@ -11560,6 +11574,7 @@ static int idle_balance(struct rq *this_rq, struct rq_flags *rf) struct sched_domain *sd; int pulled_task = 0; u64 curr_cost = 0; u64 avg_idle = this_rq->avg_idle; if (cpu_isolated(this_cpu)) return 0; Loading @@ -11575,7 +11590,9 @@ static int idle_balance(struct rq *this_rq, struct rq_flags *rf) */ if (!cpu_active(this_cpu)) return 0; if (!is_min_capacity_cpu(this_cpu) && silver_has_big_tasks() && (atomic_read(&this_rq->nr_iowait) == 0)) avg_idle = ULLONG_MAX; /* * This is OK, because current is on_cpu, which avoids it being picked * for load-balance and preemption/IRQs are still disabled avoiding Loading @@ -11584,7 +11601,7 @@ static int idle_balance(struct rq *this_rq, struct rq_flags *rf) */ rq_unpin_lock(this_rq, rf); if (this_rq->avg_idle < sysctl_sched_migration_cost || if (avg_idle < sysctl_sched_migration_cost || !READ_ONCE(this_rq->rd->overload)) { rcu_read_lock(); Loading @@ -11609,7 +11626,7 @@ static int idle_balance(struct rq *this_rq, struct rq_flags *rf) if (!(sd->flags & SD_LOAD_BALANCE)) continue; if (this_rq->avg_idle < curr_cost + sd->max_newidle_lb_cost) { if (avg_idle < curr_cost + sd->max_newidle_lb_cost) { update_next_balance(sd, &next_balance); break; } Loading