Loading kernel/sched/fair.c +34 −15 Original line number Diff line number Diff line Loading @@ -10389,6 +10389,7 @@ static inline void calculate_imbalance(struct lb_env *env, struct sd_lb_stats *s { unsigned long max_pull, load_above_capacity = ~0UL; struct sg_lb_stats *local, *busiest; bool no_imbalance = false; local = &sds->local_stat; busiest = &sds->busiest_stat; Loading @@ -10408,9 +10409,11 @@ static inline void calculate_imbalance(struct lb_env *env, struct sd_lb_stats *s * factors in sg capacity and sgs with smaller group_type are * skipped when updating the busiest sg: */ if (busiest->group_type != group_misfit_task && (busiest->avg_load <= sds->avg_load || local->avg_load >= sds->avg_load)) { if (busiest->avg_load <= sds->avg_load || local->avg_load >= sds->avg_load) no_imbalance = true; if (busiest->group_type != group_misfit_task && no_imbalance) { env->imbalance = 0; if (busiest->group_type == group_overloaded && local->group_type <= group_misfit_task) { Loading @@ -10435,19 +10438,35 @@ static inline void calculate_imbalance(struct lb_env *env, struct sd_lb_stats *s } /* * We're trying to get all the cpus to the average_load, so we don't * want to push ourselves above the average load, nor do we wish to * reduce the max loaded cpu below the average load. At the same time, * we also don't want to reduce the group load below the group * capacity. Thus we look for the minimum possible imbalance. * In case of a misfit task, independent of avg loads we do load balance * at the parent sched domain level for B.L systems, so it is possible * that busiest group avg load can be less than sd avg load. * So skip calculating load based imbalance between groups. */ if (!no_imbalance) { /* * We're trying to get all the cpus to the average_load, * so we don't want to push ourselves above the average load, * nor do we wish to reduce the max loaded cpu below the average * load. At the same time, we also don't want to reduce the * group load below the group capacity. * Thus we look for the minimum possible imbalance. */ max_pull = min(busiest->avg_load - sds->avg_load, load_above_capacity); max_pull = min(busiest->avg_load - sds->avg_load, load_above_capacity); /* How much load to actually move to equalise the imbalance */ env->imbalance = min( max_pull * busiest->group_capacity, (sds->avg_load - local->avg_load) * local->group_capacity ) / SCHED_CAPACITY_SCALE; env->imbalance = min(max_pull * busiest->group_capacity, (sds->avg_load - local->avg_load) * local->group_capacity) / SCHED_CAPACITY_SCALE; } else { /* * Skipped load based imbalance calculations, but let's find * imbalance based on busiest group type or fix small imbalance. */ env->imbalance = 0; } /* Boost imbalance to allow misfit task to be balanced. * Always do this if we are doing a NEWLY_IDLE balance Loading Loading
kernel/sched/fair.c +34 −15 Original line number Diff line number Diff line Loading @@ -10389,6 +10389,7 @@ static inline void calculate_imbalance(struct lb_env *env, struct sd_lb_stats *s { unsigned long max_pull, load_above_capacity = ~0UL; struct sg_lb_stats *local, *busiest; bool no_imbalance = false; local = &sds->local_stat; busiest = &sds->busiest_stat; Loading @@ -10408,9 +10409,11 @@ static inline void calculate_imbalance(struct lb_env *env, struct sd_lb_stats *s * factors in sg capacity and sgs with smaller group_type are * skipped when updating the busiest sg: */ if (busiest->group_type != group_misfit_task && (busiest->avg_load <= sds->avg_load || local->avg_load >= sds->avg_load)) { if (busiest->avg_load <= sds->avg_load || local->avg_load >= sds->avg_load) no_imbalance = true; if (busiest->group_type != group_misfit_task && no_imbalance) { env->imbalance = 0; if (busiest->group_type == group_overloaded && local->group_type <= group_misfit_task) { Loading @@ -10435,19 +10438,35 @@ static inline void calculate_imbalance(struct lb_env *env, struct sd_lb_stats *s } /* * We're trying to get all the cpus to the average_load, so we don't * want to push ourselves above the average load, nor do we wish to * reduce the max loaded cpu below the average load. At the same time, * we also don't want to reduce the group load below the group * capacity. Thus we look for the minimum possible imbalance. * In case of a misfit task, independent of avg loads we do load balance * at the parent sched domain level for B.L systems, so it is possible * that busiest group avg load can be less than sd avg load. * So skip calculating load based imbalance between groups. */ if (!no_imbalance) { /* * We're trying to get all the cpus to the average_load, * so we don't want to push ourselves above the average load, * nor do we wish to reduce the max loaded cpu below the average * load. At the same time, we also don't want to reduce the * group load below the group capacity. * Thus we look for the minimum possible imbalance. */ max_pull = min(busiest->avg_load - sds->avg_load, load_above_capacity); max_pull = min(busiest->avg_load - sds->avg_load, load_above_capacity); /* How much load to actually move to equalise the imbalance */ env->imbalance = min( max_pull * busiest->group_capacity, (sds->avg_load - local->avg_load) * local->group_capacity ) / SCHED_CAPACITY_SCALE; env->imbalance = min(max_pull * busiest->group_capacity, (sds->avg_load - local->avg_load) * local->group_capacity) / SCHED_CAPACITY_SCALE; } else { /* * Skipped load based imbalance calculations, but let's find * imbalance based on busiest group type or fix small imbalance. */ env->imbalance = 0; } /* Boost imbalance to allow misfit task to be balanced. * Always do this if we are doing a NEWLY_IDLE balance Loading