Loading kernel/sched/fair.c +3 −11 Original line number Diff line number Diff line Loading @@ -5593,13 +5593,6 @@ static unsigned long __cpu_norm_util(int cpu, unsigned long capacity, int delta) return DIV_ROUND_UP(util << SCHED_CAPACITY_SHIFT, capacity); } static inline bool bias_to_waker_cpu_enabled(struct task_struct *wakee, struct task_struct *waker) { return task_util(waker) > sched_big_waker_task_load && task_util(wakee) < sched_small_wakee_task_load; } static inline bool bias_to_waker_cpu(struct task_struct *p, int cpu, struct cpumask *rtg_target) { Loading Loading @@ -6954,7 +6947,6 @@ static int energy_aware_wake_cpu(struct task_struct *p, int target, int sync) struct related_thread_group *grp; cpumask_t search_cpus; int prev_cpu = task_cpu(p); struct task_struct *curr = cpu_rq(cpu)->curr; #ifdef CONFIG_SCHED_CORE_ROTATE bool do_rotate = false; bool avoid_prev_cpu = false; Loading @@ -6976,13 +6968,13 @@ static int energy_aware_wake_cpu(struct task_struct *p, int target, int sync) curr_util = boosted_task_util(cpu_rq(cpu)->curr); need_idle = wake_to_idle(p) || schedtune_prefer_idle(p); if (need_idle) sync = 0; grp = task_related_thread_group(p); if (grp && grp->preferred_cluster) rtg_target = &grp->preferred_cluster->cpus; if (sync && bias_to_waker_cpu_enabled(p, curr) && bias_to_waker_cpu(p, cpu, rtg_target)) { if (sync && bias_to_waker_cpu(p, cpu, rtg_target)) { trace_sched_task_util_bias_to_waker(p, prev_cpu, task_util(p), cpu, cpu, 0, need_idle); return cpu; Loading kernel/sched/rt.c +1 −2 Original line number Diff line number Diff line Loading @@ -1824,11 +1824,9 @@ static int find_lowest_rq(struct task_struct *task) cpumask_andnot(&backup_search_cpu, &backup_search_cpu, &search_cpu); #ifdef CONFIG_SCHED_CORE_ROTATE cpu = find_first_cpu_bit(task, &search_cpu, sg_target, &avoid_prev_cpu, &do_rotate, &first_cpu_bit_env); #endif } else { cpumask_copy(&search_cpu, lowest_mask); cpumask_clear(&backup_search_cpu); Loading Loading @@ -1912,6 +1910,7 @@ static int find_lowest_rq(struct task_struct *task) } else if (!cpumask_empty(&backup_search_cpu)) { cpumask_copy(&search_cpu, &backup_search_cpu); cpumask_clear(&backup_search_cpu); cpu = -1; goto retry; } } Loading kernel/sched/sched.h +2 −0 Original line number Diff line number Diff line Loading @@ -2853,4 +2853,6 @@ int find_first_cpu_bit(struct task_struct *p, const cpumask_t *search_cpus, struct sched_group *sg_target, bool *avoid_prev_cpu, bool *do_rotate, struct find_first_cpu_bit_env *env); #else #define find_first_cpu_bit(...) -1 #endif kernel/sched/walt.c +3 −11 Original line number Diff line number Diff line Loading @@ -162,13 +162,6 @@ static const unsigned int top_tasks_bitmap_size = */ __read_mostly unsigned int sysctl_sched_freq_reporting_policy; #define SCHED_BIG_WAKER_TASK_LOAD_PCT 25UL #define SCHED_SMALL_WAKEE_TASK_LOAD_PCT 10UL __read_mostly unsigned int sched_big_waker_task_load; __read_mostly unsigned int sched_small_wakee_task_load; static int __init set_sched_ravg_window(char *str) { unsigned int window_size; Loading Loading @@ -3121,8 +3114,7 @@ void walt_sched_init(struct rq *rq) walt_cpu_util_freq_divisor = (sched_ravg_window >> SCHED_CAPACITY_SHIFT) * 100; sched_big_waker_task_load = (SCHED_BIG_WAKER_TASK_LOAD_PCT << SCHED_CAPACITY_SHIFT) / 100; sched_small_wakee_task_load = (SCHED_SMALL_WAKEE_TASK_LOAD_PCT << SCHED_CAPACITY_SHIFT) / 100; sched_init_task_load_windows = div64_u64((u64)sysctl_sched_init_task_load_pct * (u64)sched_ravg_window, 100); } Loading
kernel/sched/fair.c +3 −11 Original line number Diff line number Diff line Loading @@ -5593,13 +5593,6 @@ static unsigned long __cpu_norm_util(int cpu, unsigned long capacity, int delta) return DIV_ROUND_UP(util << SCHED_CAPACITY_SHIFT, capacity); } static inline bool bias_to_waker_cpu_enabled(struct task_struct *wakee, struct task_struct *waker) { return task_util(waker) > sched_big_waker_task_load && task_util(wakee) < sched_small_wakee_task_load; } static inline bool bias_to_waker_cpu(struct task_struct *p, int cpu, struct cpumask *rtg_target) { Loading Loading @@ -6954,7 +6947,6 @@ static int energy_aware_wake_cpu(struct task_struct *p, int target, int sync) struct related_thread_group *grp; cpumask_t search_cpus; int prev_cpu = task_cpu(p); struct task_struct *curr = cpu_rq(cpu)->curr; #ifdef CONFIG_SCHED_CORE_ROTATE bool do_rotate = false; bool avoid_prev_cpu = false; Loading @@ -6976,13 +6968,13 @@ static int energy_aware_wake_cpu(struct task_struct *p, int target, int sync) curr_util = boosted_task_util(cpu_rq(cpu)->curr); need_idle = wake_to_idle(p) || schedtune_prefer_idle(p); if (need_idle) sync = 0; grp = task_related_thread_group(p); if (grp && grp->preferred_cluster) rtg_target = &grp->preferred_cluster->cpus; if (sync && bias_to_waker_cpu_enabled(p, curr) && bias_to_waker_cpu(p, cpu, rtg_target)) { if (sync && bias_to_waker_cpu(p, cpu, rtg_target)) { trace_sched_task_util_bias_to_waker(p, prev_cpu, task_util(p), cpu, cpu, 0, need_idle); return cpu; Loading
kernel/sched/rt.c +1 −2 Original line number Diff line number Diff line Loading @@ -1824,11 +1824,9 @@ static int find_lowest_rq(struct task_struct *task) cpumask_andnot(&backup_search_cpu, &backup_search_cpu, &search_cpu); #ifdef CONFIG_SCHED_CORE_ROTATE cpu = find_first_cpu_bit(task, &search_cpu, sg_target, &avoid_prev_cpu, &do_rotate, &first_cpu_bit_env); #endif } else { cpumask_copy(&search_cpu, lowest_mask); cpumask_clear(&backup_search_cpu); Loading Loading @@ -1912,6 +1910,7 @@ static int find_lowest_rq(struct task_struct *task) } else if (!cpumask_empty(&backup_search_cpu)) { cpumask_copy(&search_cpu, &backup_search_cpu); cpumask_clear(&backup_search_cpu); cpu = -1; goto retry; } } Loading
kernel/sched/sched.h +2 −0 Original line number Diff line number Diff line Loading @@ -2853,4 +2853,6 @@ int find_first_cpu_bit(struct task_struct *p, const cpumask_t *search_cpus, struct sched_group *sg_target, bool *avoid_prev_cpu, bool *do_rotate, struct find_first_cpu_bit_env *env); #else #define find_first_cpu_bit(...) -1 #endif
kernel/sched/walt.c +3 −11 Original line number Diff line number Diff line Loading @@ -162,13 +162,6 @@ static const unsigned int top_tasks_bitmap_size = */ __read_mostly unsigned int sysctl_sched_freq_reporting_policy; #define SCHED_BIG_WAKER_TASK_LOAD_PCT 25UL #define SCHED_SMALL_WAKEE_TASK_LOAD_PCT 10UL __read_mostly unsigned int sched_big_waker_task_load; __read_mostly unsigned int sched_small_wakee_task_load; static int __init set_sched_ravg_window(char *str) { unsigned int window_size; Loading Loading @@ -3121,8 +3114,7 @@ void walt_sched_init(struct rq *rq) walt_cpu_util_freq_divisor = (sched_ravg_window >> SCHED_CAPACITY_SHIFT) * 100; sched_big_waker_task_load = (SCHED_BIG_WAKER_TASK_LOAD_PCT << SCHED_CAPACITY_SHIFT) / 100; sched_small_wakee_task_load = (SCHED_SMALL_WAKEE_TASK_LOAD_PCT << SCHED_CAPACITY_SHIFT) / 100; sched_init_task_load_windows = div64_u64((u64)sysctl_sched_init_task_load_pct * (u64)sched_ravg_window, 100); }