Loading kernel/sched/fair.c +21 −2 Original line number Diff line number Diff line Loading @@ -11768,6 +11768,21 @@ static inline bool nohz_idle_balance(struct rq *this_rq, enum cpu_idle_type idle static inline void nohz_newidle_balance(struct rq *this_rq) { } #endif /* CONFIG_NO_HZ_COMMON */ static bool silver_has_big_tasks(void) { int cpu; for_each_possible_cpu(cpu) { if (!is_min_capacity_cpu(cpu)) break; if (walt_big_tasks(cpu)) return true; } return false; } /* * idle_balance is called by schedule() if this_cpu is about to become * idle. Attempts to pull tasks from other CPUs. Loading @@ -11781,6 +11796,10 @@ static int idle_balance(struct rq *this_rq, struct rq_flags *rf) u64 curr_cost = 0; u64 avg_idle = this_rq->avg_idle; bool prefer_spread = prefer_spread_on_idle(this_cpu); bool force_lb = (!is_min_capacity_cpu(this_cpu) && silver_has_big_tasks() && (atomic_read(&this_rq->nr_iowait) == 0)); if (cpu_isolated(this_cpu)) return 0; Loading @@ -11797,7 +11816,7 @@ static int idle_balance(struct rq *this_rq, struct rq_flags *rf) if (!cpu_active(this_cpu)) return 0; if (prefer_spread) if (force_lb || prefer_spread) avg_idle = ULLONG_MAX; /* * This is OK, because current is on_cpu, which avoids it being picked Loading Loading @@ -11832,7 +11851,7 @@ static int idle_balance(struct rq *this_rq, struct rq_flags *rf) if (!(sd->flags & SD_LOAD_BALANCE)) continue; if (prefer_spread && if (prefer_spread && !force_lb && (sd->flags & SD_ASYM_CPUCAPACITY) && !is_asym_cap_cpu(this_cpu)) avg_idle = this_rq->avg_idle; Loading Loading
kernel/sched/fair.c +21 −2 Original line number Diff line number Diff line Loading @@ -11768,6 +11768,21 @@ static inline bool nohz_idle_balance(struct rq *this_rq, enum cpu_idle_type idle static inline void nohz_newidle_balance(struct rq *this_rq) { } #endif /* CONFIG_NO_HZ_COMMON */ static bool silver_has_big_tasks(void) { int cpu; for_each_possible_cpu(cpu) { if (!is_min_capacity_cpu(cpu)) break; if (walt_big_tasks(cpu)) return true; } return false; } /* * idle_balance is called by schedule() if this_cpu is about to become * idle. Attempts to pull tasks from other CPUs. Loading @@ -11781,6 +11796,10 @@ static int idle_balance(struct rq *this_rq, struct rq_flags *rf) u64 curr_cost = 0; u64 avg_idle = this_rq->avg_idle; bool prefer_spread = prefer_spread_on_idle(this_cpu); bool force_lb = (!is_min_capacity_cpu(this_cpu) && silver_has_big_tasks() && (atomic_read(&this_rq->nr_iowait) == 0)); if (cpu_isolated(this_cpu)) return 0; Loading @@ -11797,7 +11816,7 @@ static int idle_balance(struct rq *this_rq, struct rq_flags *rf) if (!cpu_active(this_cpu)) return 0; if (prefer_spread) if (force_lb || prefer_spread) avg_idle = ULLONG_MAX; /* * This is OK, because current is on_cpu, which avoids it being picked Loading Loading @@ -11832,7 +11851,7 @@ static int idle_balance(struct rq *this_rq, struct rq_flags *rf) if (!(sd->flags & SD_LOAD_BALANCE)) continue; if (prefer_spread && if (prefer_spread && !force_lb && (sd->flags & SD_ASYM_CPUCAPACITY) && !is_asym_cap_cpu(this_cpu)) avg_idle = this_rq->avg_idle; Loading