Loading kernel/sched/fair.c +45 −2 Original line number Diff line number Diff line Loading @@ -169,6 +169,7 @@ unsigned int sysctl_sched_capacity_margin_down = 1205; /* ~15% margin */ #ifdef CONFIG_SCHED_WALT unsigned int sysctl_sched_min_task_util_for_boost_colocation; #endif static unsigned int __maybe_unused sched_small_task_threshold = 102; static inline void update_load_add(struct load_weight *lw, unsigned long inc) { Loading Loading @@ -9870,6 +9871,18 @@ static struct rq *find_busiest_queue(struct lb_env *env, capacity = capacity_of(i); /* * For ASYM_CPUCAPACITY domains, don't pick a cpu that could * eventually lead to active_balancing high->low capacity. * Higher per-cpu capacity is considered better than balancing * average load. */ if (env->sd->flags & SD_ASYM_CPUCAPACITY && capacity_of(env->dst_cpu) < capacity && (rq->nr_running == 1 || (rq->nr_running == 2 && task_util(rq->curr) < sched_small_task_threshold))) continue; wl = weighted_cpuload(i); /* Loading Loading @@ -10344,6 +10357,27 @@ update_next_balance(struct sched_domain *sd, unsigned long *next_balance) *next_balance = next; } #ifdef CONFIG_SCHED_WALT static inline bool min_cap_cluster_has_misfit_task(void) { int cpu; for_each_possible_cpu(cpu) { if (!is_min_capacity_cpu(cpu)) break; if (cpu_rq(cpu)->walt_stats.nr_big_tasks) return true; } return false; } #else static inline bool min_cap_cluster_has_misfit_task(void) { return false; } #endif /* * idle_balance is called by schedule() if this_cpu is about to become * idle. Attempts to pull tasks from other CPUs. Loading @@ -10355,17 +10389,25 @@ static int idle_balance(struct rq *this_rq) struct sched_domain *sd; int pulled_task = 0; u64 curr_cost = 0; bool force_lb = false; if (cpu_isolated(this_cpu)) return 0; /* * Force higher capacity CPUs doing load balance, when the lower * capacity CPUs has some misfit tasks. */ if (!is_min_capacity_cpu(this_cpu) && min_cap_cluster_has_misfit_task()) force_lb = true; /* * We must set idle_stamp _before_ calling idle_balance(), such that we * measure the duration of idle_balance() as idle time. */ this_rq->idle_stamp = rq_clock(this_rq); if (!energy_aware() && if (!energy_aware() && !force_lb && (this_rq->avg_idle < sysctl_sched_migration_cost || !this_rq->rd->overload)) { rcu_read_lock(); Loading @@ -10388,7 +10430,8 @@ static int idle_balance(struct rq *this_rq) if (!(sd->flags & SD_LOAD_BALANCE)) continue; if (this_rq->avg_idle < curr_cost + sd->max_newidle_lb_cost) { if (!force_lb && this_rq->avg_idle < curr_cost + sd->max_newidle_lb_cost) { update_next_balance(sd, &next_balance); break; } Loading Loading
kernel/sched/fair.c +45 −2 Original line number Diff line number Diff line Loading @@ -169,6 +169,7 @@ unsigned int sysctl_sched_capacity_margin_down = 1205; /* ~15% margin */ #ifdef CONFIG_SCHED_WALT unsigned int sysctl_sched_min_task_util_for_boost_colocation; #endif static unsigned int __maybe_unused sched_small_task_threshold = 102; static inline void update_load_add(struct load_weight *lw, unsigned long inc) { Loading Loading @@ -9870,6 +9871,18 @@ static struct rq *find_busiest_queue(struct lb_env *env, capacity = capacity_of(i); /* * For ASYM_CPUCAPACITY domains, don't pick a cpu that could * eventually lead to active_balancing high->low capacity. * Higher per-cpu capacity is considered better than balancing * average load. */ if (env->sd->flags & SD_ASYM_CPUCAPACITY && capacity_of(env->dst_cpu) < capacity && (rq->nr_running == 1 || (rq->nr_running == 2 && task_util(rq->curr) < sched_small_task_threshold))) continue; wl = weighted_cpuload(i); /* Loading Loading @@ -10344,6 +10357,27 @@ update_next_balance(struct sched_domain *sd, unsigned long *next_balance) *next_balance = next; } #ifdef CONFIG_SCHED_WALT static inline bool min_cap_cluster_has_misfit_task(void) { int cpu; for_each_possible_cpu(cpu) { if (!is_min_capacity_cpu(cpu)) break; if (cpu_rq(cpu)->walt_stats.nr_big_tasks) return true; } return false; } #else static inline bool min_cap_cluster_has_misfit_task(void) { return false; } #endif /* * idle_balance is called by schedule() if this_cpu is about to become * idle. Attempts to pull tasks from other CPUs. Loading @@ -10355,17 +10389,25 @@ static int idle_balance(struct rq *this_rq) struct sched_domain *sd; int pulled_task = 0; u64 curr_cost = 0; bool force_lb = false; if (cpu_isolated(this_cpu)) return 0; /* * Force higher capacity CPUs doing load balance, when the lower * capacity CPUs has some misfit tasks. */ if (!is_min_capacity_cpu(this_cpu) && min_cap_cluster_has_misfit_task()) force_lb = true; /* * We must set idle_stamp _before_ calling idle_balance(), such that we * measure the duration of idle_balance() as idle time. */ this_rq->idle_stamp = rq_clock(this_rq); if (!energy_aware() && if (!energy_aware() && !force_lb && (this_rq->avg_idle < sysctl_sched_migration_cost || !this_rq->rd->overload)) { rcu_read_lock(); Loading @@ -10388,7 +10430,8 @@ static int idle_balance(struct rq *this_rq) if (!(sd->flags & SD_LOAD_BALANCE)) continue; if (this_rq->avg_idle < curr_cost + sd->max_newidle_lb_cost) { if (!force_lb && this_rq->avg_idle < curr_cost + sd->max_newidle_lb_cost) { update_next_balance(sd, &next_balance); break; } Loading