Loading kernel/sched/fair.c +16 −1 Original line number Diff line number Diff line Loading @@ -10425,9 +10425,24 @@ static inline void calculate_imbalance(struct lb_env *env, struct sd_lb_stats *s * a think about bumping its value to force at least one task to be * moved */ if (env->imbalance < busiest->load_per_task) if (env->imbalance < busiest->load_per_task) { /* * The busiest group is overloaded so it could use help * from the other groups. If the local group has idle CPUs * and it is not overloaded and has no imbalance with in * the group, allow the load balance by bumping the * imbalance. */ if (busiest->group_type == group_overloaded && local->group_type <= group_misfit_task && env->idle != CPU_NOT_IDLE) { env->imbalance = busiest->load_per_task; return; } return fix_small_imbalance(env, sds); } } /******* find_busiest_group() helpers end here *********************/ Loading Loading
kernel/sched/fair.c +16 −1 Original line number Diff line number Diff line Loading @@ -10425,9 +10425,24 @@ static inline void calculate_imbalance(struct lb_env *env, struct sd_lb_stats *s * a think about bumping its value to force at least one task to be * moved */ if (env->imbalance < busiest->load_per_task) if (env->imbalance < busiest->load_per_task) { /* * The busiest group is overloaded so it could use help * from the other groups. If the local group has idle CPUs * and it is not overloaded and has no imbalance with in * the group, allow the load balance by bumping the * imbalance. */ if (busiest->group_type == group_overloaded && local->group_type <= group_misfit_task && env->idle != CPU_NOT_IDLE) { env->imbalance = busiest->load_per_task; return; } return fix_small_imbalance(env, sds); } } /******* find_busiest_group() helpers end here *********************/ Loading