Loading include/linux/sched/sysctl.h +1 −5 Original line number Diff line number Diff line Loading @@ -24,13 +24,9 @@ extern unsigned int sysctl_sched_initial_task_util; extern unsigned int sysctl_sched_cstate_aware; extern unsigned int sysctl_sched_capacity_margin; extern unsigned int sysctl_sched_capacity_margin_down; #ifdef CONFIG_SCHED_WALT extern unsigned int sysctl_sched_use_walt_cpu_util; extern unsigned int sysctl_sched_use_walt_task_util; extern unsigned int sysctl_sched_init_task_load_pct; #endif #ifdef CONFIG_SCHED_WALT extern unsigned int sysctl_sched_init_task_load_pct; extern unsigned int sysctl_sched_cpu_high_irqload; extern unsigned int sysctl_sched_use_walt_cpu_util; extern unsigned int sysctl_sched_use_walt_task_util; Loading kernel/sched/fair.c +0 −12 Original line number Diff line number Diff line Loading @@ -5323,18 +5323,6 @@ static unsigned long __cpu_norm_util(int cpu, unsigned long capacity, int delta) return DIV_ROUND_UP(util << SCHED_CAPACITY_SHIFT, capacity); } static inline int task_util(struct task_struct *p) { #ifdef CONFIG_SCHED_WALT if (!walt_disabled && sysctl_sched_use_walt_task_util) { u64 demand = p->ravg.demand; return (demand << 10) / sched_ravg_window; } #endif return p->se.avg.util_avg; } static inline bool bias_to_waker_cpu(struct task_struct *p, int cpu, struct cpumask *rtg_target) { Loading kernel/sched/rt.c +0 −12 Original line number Diff line number Diff line Loading @@ -1713,18 +1713,6 @@ static struct task_struct *pick_highest_pushable_task(struct rq *rq, int cpu) static DEFINE_PER_CPU(cpumask_var_t, local_cpu_mask); static inline unsigned long task_util(struct task_struct *p) { #ifdef CONFIG_SCHED_WALT if (!walt_disabled && sysctl_sched_use_walt_task_util) { u64 demand = p->ravg.demand; return (demand << 10) / sched_ravg_window; } #endif return p->se.avg.util_avg; } static int find_lowest_rq(struct task_struct *task) { struct sched_domain *sd; Loading kernel/sched/sched.h +10 −1 Original line number Diff line number Diff line Loading @@ -1704,9 +1704,18 @@ static inline unsigned long capacity_orig_of(int cpu) return cpu_rq(cpu)->cpu_capacity_orig; } extern unsigned int sysctl_sched_use_walt_cpu_util; extern unsigned int walt_disabled; static inline unsigned long task_util(struct task_struct *p) { #ifdef CONFIG_SCHED_WALT if (!walt_disabled && sysctl_sched_use_walt_task_util) return p->ravg.demand / (sched_ravg_window >> SCHED_CAPACITY_SHIFT); #endif return p->se.avg.util_avg; } /* * cpu_util returns the amount of capacity of a CPU that is used by CFS * tasks. The unit of the return value must be the one of capacity so we can Loading Loading
include/linux/sched/sysctl.h +1 −5 Original line number Diff line number Diff line Loading @@ -24,13 +24,9 @@ extern unsigned int sysctl_sched_initial_task_util; extern unsigned int sysctl_sched_cstate_aware; extern unsigned int sysctl_sched_capacity_margin; extern unsigned int sysctl_sched_capacity_margin_down; #ifdef CONFIG_SCHED_WALT extern unsigned int sysctl_sched_use_walt_cpu_util; extern unsigned int sysctl_sched_use_walt_task_util; extern unsigned int sysctl_sched_init_task_load_pct; #endif #ifdef CONFIG_SCHED_WALT extern unsigned int sysctl_sched_init_task_load_pct; extern unsigned int sysctl_sched_cpu_high_irqload; extern unsigned int sysctl_sched_use_walt_cpu_util; extern unsigned int sysctl_sched_use_walt_task_util; Loading
kernel/sched/fair.c +0 −12 Original line number Diff line number Diff line Loading @@ -5323,18 +5323,6 @@ static unsigned long __cpu_norm_util(int cpu, unsigned long capacity, int delta) return DIV_ROUND_UP(util << SCHED_CAPACITY_SHIFT, capacity); } static inline int task_util(struct task_struct *p) { #ifdef CONFIG_SCHED_WALT if (!walt_disabled && sysctl_sched_use_walt_task_util) { u64 demand = p->ravg.demand; return (demand << 10) / sched_ravg_window; } #endif return p->se.avg.util_avg; } static inline bool bias_to_waker_cpu(struct task_struct *p, int cpu, struct cpumask *rtg_target) { Loading
kernel/sched/rt.c +0 −12 Original line number Diff line number Diff line Loading @@ -1713,18 +1713,6 @@ static struct task_struct *pick_highest_pushable_task(struct rq *rq, int cpu) static DEFINE_PER_CPU(cpumask_var_t, local_cpu_mask); static inline unsigned long task_util(struct task_struct *p) { #ifdef CONFIG_SCHED_WALT if (!walt_disabled && sysctl_sched_use_walt_task_util) { u64 demand = p->ravg.demand; return (demand << 10) / sched_ravg_window; } #endif return p->se.avg.util_avg; } static int find_lowest_rq(struct task_struct *task) { struct sched_domain *sd; Loading
kernel/sched/sched.h +10 −1 Original line number Diff line number Diff line Loading @@ -1704,9 +1704,18 @@ static inline unsigned long capacity_orig_of(int cpu) return cpu_rq(cpu)->cpu_capacity_orig; } extern unsigned int sysctl_sched_use_walt_cpu_util; extern unsigned int walt_disabled; static inline unsigned long task_util(struct task_struct *p) { #ifdef CONFIG_SCHED_WALT if (!walt_disabled && sysctl_sched_use_walt_task_util) return p->ravg.demand / (sched_ravg_window >> SCHED_CAPACITY_SHIFT); #endif return p->se.avg.util_avg; } /* * cpu_util returns the amount of capacity of a CPU that is used by CFS * tasks. The unit of the return value must be the one of capacity so we can Loading