Loading kernel/sched/fair.c +7 −5 Original line number Diff line number Diff line Loading @@ -4005,7 +4005,7 @@ static inline void adjust_cpus_for_packing(struct task_struct *p, if (*best_idle_cpu == -1 || *target_cpu == -1) return; if (prefer_spread_on_idle(*best_idle_cpu)) if (prefer_spread_on_idle(*best_idle_cpu, false)) fbt_env->need_idle |= 2; if (task_rtg_high_prio(p) && walt_nr_rtg_high_prio(*target_cpu) > 0) { Loading Loading @@ -10676,7 +10676,8 @@ static int load_balance(int this_cpu, struct rq *this_rq, }; env.prefer_spread = (idle != CPU_NOT_IDLE && prefer_spread_on_idle(this_cpu) && prefer_spread_on_idle(this_cpu, idle == CPU_NEWLY_IDLE) && !((sd->flags & SD_ASYM_CPUCAPACITY) && !is_asym_cap_cpu(this_cpu))); Loading Loading @@ -11202,7 +11203,8 @@ static void rebalance_domains(struct rq *rq, enum cpu_idle_type idle) } max_cost += sd->max_newidle_lb_cost; if (!sd_overutilized(sd) && !prefer_spread_on_idle(cpu)) if (!sd_overutilized(sd) && !prefer_spread_on_idle(cpu, idle == CPU_NEWLY_IDLE)) continue; if (!(sd->flags & SD_LOAD_BALANCE)) Loading Loading @@ -11456,7 +11458,7 @@ static void nohz_balancer_kick(struct rq *rq) */ if (static_branch_likely(&sched_energy_present)) { if (rq->nr_running >= 2 && (cpu_overutilized(cpu) || prefer_spread_on_idle(cpu))) prefer_spread_on_idle(cpu, false))) flags = NOHZ_KICK_MASK; goto out; } Loading Loading @@ -11827,7 +11829,7 @@ static int idle_balance(struct rq *this_rq, struct rq_flags *rf) int pulled_task = 0; u64 curr_cost = 0; u64 avg_idle = this_rq->avg_idle; bool prefer_spread = prefer_spread_on_idle(this_cpu); bool prefer_spread = prefer_spread_on_idle(this_cpu, true); bool force_lb = (!is_min_capacity_cpu(this_cpu) && silver_has_big_tasks() && (atomic_read(&this_rq->nr_iowait) == 0)); Loading kernel/sched/walt.h +12 −7 Original line number Diff line number Diff line Loading @@ -451,15 +451,20 @@ static int in_sched_bug; } \ }) static inline bool prefer_spread_on_idle(int cpu) static inline bool prefer_spread_on_idle(int cpu, bool new_ilb) { if (likely(!sysctl_sched_prefer_spread)) switch (sysctl_sched_prefer_spread) { case 1: return is_min_capacity_cpu(cpu); case 2: return true; case 3: return (new_ilb && is_min_capacity_cpu(cpu)); case 4: return new_ilb; default: return false; if (is_min_capacity_cpu(cpu)) return sysctl_sched_prefer_spread >= 1; return sysctl_sched_prefer_spread > 1; } } #else /* CONFIG_SCHED_WALT */ Loading kernel/sysctl.c +1 −1 Original line number Diff line number Diff line Loading @@ -568,7 +568,7 @@ static struct ctl_table kern_table[] = { .mode = 0644, .proc_handler = proc_dointvec_minmax, .extra1 = &zero, .extra2 = &two, .extra2 = &four, }, { .procname = "walt_rtg_cfs_boost_prio", Loading Loading
kernel/sched/fair.c +7 −5 Original line number Diff line number Diff line Loading @@ -4005,7 +4005,7 @@ static inline void adjust_cpus_for_packing(struct task_struct *p, if (*best_idle_cpu == -1 || *target_cpu == -1) return; if (prefer_spread_on_idle(*best_idle_cpu)) if (prefer_spread_on_idle(*best_idle_cpu, false)) fbt_env->need_idle |= 2; if (task_rtg_high_prio(p) && walt_nr_rtg_high_prio(*target_cpu) > 0) { Loading Loading @@ -10676,7 +10676,8 @@ static int load_balance(int this_cpu, struct rq *this_rq, }; env.prefer_spread = (idle != CPU_NOT_IDLE && prefer_spread_on_idle(this_cpu) && prefer_spread_on_idle(this_cpu, idle == CPU_NEWLY_IDLE) && !((sd->flags & SD_ASYM_CPUCAPACITY) && !is_asym_cap_cpu(this_cpu))); Loading Loading @@ -11202,7 +11203,8 @@ static void rebalance_domains(struct rq *rq, enum cpu_idle_type idle) } max_cost += sd->max_newidle_lb_cost; if (!sd_overutilized(sd) && !prefer_spread_on_idle(cpu)) if (!sd_overutilized(sd) && !prefer_spread_on_idle(cpu, idle == CPU_NEWLY_IDLE)) continue; if (!(sd->flags & SD_LOAD_BALANCE)) Loading Loading @@ -11456,7 +11458,7 @@ static void nohz_balancer_kick(struct rq *rq) */ if (static_branch_likely(&sched_energy_present)) { if (rq->nr_running >= 2 && (cpu_overutilized(cpu) || prefer_spread_on_idle(cpu))) prefer_spread_on_idle(cpu, false))) flags = NOHZ_KICK_MASK; goto out; } Loading Loading @@ -11827,7 +11829,7 @@ static int idle_balance(struct rq *this_rq, struct rq_flags *rf) int pulled_task = 0; u64 curr_cost = 0; u64 avg_idle = this_rq->avg_idle; bool prefer_spread = prefer_spread_on_idle(this_cpu); bool prefer_spread = prefer_spread_on_idle(this_cpu, true); bool force_lb = (!is_min_capacity_cpu(this_cpu) && silver_has_big_tasks() && (atomic_read(&this_rq->nr_iowait) == 0)); Loading
kernel/sched/walt.h +12 −7 Original line number Diff line number Diff line Loading @@ -451,15 +451,20 @@ static int in_sched_bug; } \ }) static inline bool prefer_spread_on_idle(int cpu) static inline bool prefer_spread_on_idle(int cpu, bool new_ilb) { if (likely(!sysctl_sched_prefer_spread)) switch (sysctl_sched_prefer_spread) { case 1: return is_min_capacity_cpu(cpu); case 2: return true; case 3: return (new_ilb && is_min_capacity_cpu(cpu)); case 4: return new_ilb; default: return false; if (is_min_capacity_cpu(cpu)) return sysctl_sched_prefer_spread >= 1; return sysctl_sched_prefer_spread > 1; } } #else /* CONFIG_SCHED_WALT */ Loading
kernel/sysctl.c +1 −1 Original line number Diff line number Diff line Loading @@ -568,7 +568,7 @@ static struct ctl_table kern_table[] = { .mode = 0644, .proc_handler = proc_dointvec_minmax, .extra1 = &zero, .extra2 = &two, .extra2 = &four, }, { .procname = "walt_rtg_cfs_boost_prio", Loading