Loading kernel/sched/fair.c +7 −5 Original line number Diff line number Diff line Loading @@ -6161,15 +6161,15 @@ static inline bool task_fits_spare(struct task_struct *p, int cpu) return __task_fits(p, cpu, cpu_util(cpu)); } static bool __cpu_overutilized(int cpu, int delta) bool __cpu_overutilized(int cpu, unsigned long util) { return (capacity_orig_of(cpu) * 1024) < ((cpu_util(cpu) + delta) * sysctl_sched_capacity_margin); return (capacity_orig_of(cpu) * 1024 < util * sysctl_sched_capacity_margin); } bool cpu_overutilized(int cpu) { return __cpu_overutilized(cpu, 0); return __cpu_overutilized(cpu, cpu_util(cpu)); } #ifdef CONFIG_SCHED_TUNE Loading Loading @@ -7188,7 +7188,9 @@ static int energy_aware_wake_cpu(struct task_struct *p, int target, int sync) task_util_boosted = 0; #endif /* Not enough spare capacity on previous cpu */ if (__cpu_overutilized(task_cpu(p), task_util_boosted)) { if (__cpu_overutilized(task_cpu(p), cpu_util(task_cpu(p)) + task_util_boosted)) { trace_sched_task_util_overutilzed(p, task_cpu(p), task_util(p), target_cpu, target_cpu, 0, need_idle); Loading kernel/sched/rt.c +7 −42 Original line number Diff line number Diff line Loading @@ -1724,11 +1724,7 @@ static int find_lowest_rq(struct task_struct *task) unsigned long cpu_capacity; unsigned long best_capacity; unsigned long util, best_cpu_util = ULONG_MAX; int best_cpu_idle_idx = INT_MAX; int cpu_idle_idx = -1; long new_util_cum; int max_spare_cap_cpu = -1; long max_spare_cap = -LONG_MAX; unsigned long tutil = task_util(task); bool placement_boost; /* Make sure the mask is initialized first */ Loading Loading @@ -1791,55 +1787,24 @@ static int find_lowest_rq(struct task_struct *task) * double count rt task load. */ util = cpu_util(cpu); if (!cpu_overutilized(cpu)) { if (!__cpu_overutilized(cpu, util + tutil)) { if (cpu_isolated(cpu)) continue; if (sched_cpu_high_irqload(cpu)) continue; new_util_cum = cpu_util_cum(cpu, 0); if (!task_in_cum_window_demand(cpu_rq(cpu), task)) new_util_cum += task_util(task); trace_sched_cpu_util(task, cpu, task_util(task), 0, new_util_cum, 0); if (sysctl_sched_cstate_aware) cpu_idle_idx = idle_get_state_idx(cpu_rq(cpu)); if (add_capacity_margin(new_util_cum, cpu) < capacity_curr_of(cpu)) { if (cpu_idle_idx < best_cpu_idle_idx || (best_cpu != task_cpu(task) && (best_cpu_idle_idx == cpu_idle_idx && best_cpu_util > util))) { if (best_cpu_util > util || (best_cpu_util == util && cpu == task_cpu(task))) { best_cpu_util = util; best_cpu = cpu; best_cpu_idle_idx = cpu_idle_idx; } } else { long spare_cap = capacity_of(cpu) - util; if (spare_cap > 0 && max_spare_cap < spare_cap) { max_spare_cap_cpu = cpu; max_spare_cap = spare_cap; } } } } if (best_cpu != -1) { return best_cpu; } else if (max_spare_cap_cpu != -1) { return max_spare_cap_cpu; } else if (!cpumask_empty(&backup_search_cpu)) { cpumask_copy(&search_cpu, &backup_search_cpu); cpumask_clear(&backup_search_cpu); Loading kernel/sched/sched.h +1 −0 Original line number Diff line number Diff line Loading @@ -1457,6 +1457,7 @@ extern void trigger_load_balance(struct rq *rq); extern void set_cpus_allowed_common(struct task_struct *p, const struct cpumask *new_mask); bool __cpu_overutilized(int cpu, unsigned long util); bool cpu_overutilized(int cpu); #endif Loading Loading
kernel/sched/fair.c +7 −5 Original line number Diff line number Diff line Loading @@ -6161,15 +6161,15 @@ static inline bool task_fits_spare(struct task_struct *p, int cpu) return __task_fits(p, cpu, cpu_util(cpu)); } static bool __cpu_overutilized(int cpu, int delta) bool __cpu_overutilized(int cpu, unsigned long util) { return (capacity_orig_of(cpu) * 1024) < ((cpu_util(cpu) + delta) * sysctl_sched_capacity_margin); return (capacity_orig_of(cpu) * 1024 < util * sysctl_sched_capacity_margin); } bool cpu_overutilized(int cpu) { return __cpu_overutilized(cpu, 0); return __cpu_overutilized(cpu, cpu_util(cpu)); } #ifdef CONFIG_SCHED_TUNE Loading Loading @@ -7188,7 +7188,9 @@ static int energy_aware_wake_cpu(struct task_struct *p, int target, int sync) task_util_boosted = 0; #endif /* Not enough spare capacity on previous cpu */ if (__cpu_overutilized(task_cpu(p), task_util_boosted)) { if (__cpu_overutilized(task_cpu(p), cpu_util(task_cpu(p)) + task_util_boosted)) { trace_sched_task_util_overutilzed(p, task_cpu(p), task_util(p), target_cpu, target_cpu, 0, need_idle); Loading
kernel/sched/rt.c +7 −42 Original line number Diff line number Diff line Loading @@ -1724,11 +1724,7 @@ static int find_lowest_rq(struct task_struct *task) unsigned long cpu_capacity; unsigned long best_capacity; unsigned long util, best_cpu_util = ULONG_MAX; int best_cpu_idle_idx = INT_MAX; int cpu_idle_idx = -1; long new_util_cum; int max_spare_cap_cpu = -1; long max_spare_cap = -LONG_MAX; unsigned long tutil = task_util(task); bool placement_boost; /* Make sure the mask is initialized first */ Loading Loading @@ -1791,55 +1787,24 @@ static int find_lowest_rq(struct task_struct *task) * double count rt task load. */ util = cpu_util(cpu); if (!cpu_overutilized(cpu)) { if (!__cpu_overutilized(cpu, util + tutil)) { if (cpu_isolated(cpu)) continue; if (sched_cpu_high_irqload(cpu)) continue; new_util_cum = cpu_util_cum(cpu, 0); if (!task_in_cum_window_demand(cpu_rq(cpu), task)) new_util_cum += task_util(task); trace_sched_cpu_util(task, cpu, task_util(task), 0, new_util_cum, 0); if (sysctl_sched_cstate_aware) cpu_idle_idx = idle_get_state_idx(cpu_rq(cpu)); if (add_capacity_margin(new_util_cum, cpu) < capacity_curr_of(cpu)) { if (cpu_idle_idx < best_cpu_idle_idx || (best_cpu != task_cpu(task) && (best_cpu_idle_idx == cpu_idle_idx && best_cpu_util > util))) { if (best_cpu_util > util || (best_cpu_util == util && cpu == task_cpu(task))) { best_cpu_util = util; best_cpu = cpu; best_cpu_idle_idx = cpu_idle_idx; } } else { long spare_cap = capacity_of(cpu) - util; if (spare_cap > 0 && max_spare_cap < spare_cap) { max_spare_cap_cpu = cpu; max_spare_cap = spare_cap; } } } } if (best_cpu != -1) { return best_cpu; } else if (max_spare_cap_cpu != -1) { return max_spare_cap_cpu; } else if (!cpumask_empty(&backup_search_cpu)) { cpumask_copy(&search_cpu, &backup_search_cpu); cpumask_clear(&backup_search_cpu); Loading
kernel/sched/sched.h +1 −0 Original line number Diff line number Diff line Loading @@ -1457,6 +1457,7 @@ extern void trigger_load_balance(struct rq *rq); extern void set_cpus_allowed_common(struct task_struct *p, const struct cpumask *new_mask); bool __cpu_overutilized(int cpu, unsigned long util); bool cpu_overutilized(int cpu); #endif Loading