Loading include/linux/sched.h +5 −0 Original line number Original line Diff line number Diff line Loading @@ -183,6 +183,7 @@ extern void sched_get_nr_running_avg(int *avg, int *iowait_avg, int *big_avg, unsigned int *max_nr, unsigned int *max_nr, unsigned int *big_max_nr); unsigned int *big_max_nr); extern unsigned int sched_get_cpu_util(int cpu); extern unsigned int sched_get_cpu_util(int cpu); extern u64 sched_get_cpu_last_busy_time(int cpu); #else #else static inline void sched_update_nr_prod(int cpu, long delta, bool inc) static inline void sched_update_nr_prod(int cpu, long delta, bool inc) { { Loading @@ -196,6 +197,10 @@ static inline unsigned int sched_get_cpu_util(int cpu) { { return 0; return 0; } } static inline u64 sched_get_cpu_last_busy_time(int cpu) { return 0; } #endif #endif extern void calc_global_load(unsigned long ticks); extern void calc_global_load(unsigned long ticks); Loading kernel/sched/sched.h +7 −0 Original line number Original line Diff line number Diff line Loading @@ -2483,6 +2483,11 @@ static inline bool is_max_capacity_cpu(int cpu) return cpu_max_possible_capacity(cpu) == max_possible_capacity; return cpu_max_possible_capacity(cpu) == max_possible_capacity; } } static inline bool is_min_capacity_cpu(int cpu) { return cpu_max_possible_capacity(cpu) == min_max_possible_capacity; } /* /* * 'load' is in reference to "best cpu" at its best frequency. * 'load' is in reference to "best cpu" at its best frequency. * Scale that in reference to a given cpu, accounting for how bad it is * Scale that in reference to a given cpu, accounting for how bad it is Loading Loading @@ -2682,7 +2687,9 @@ static inline int sched_boost(void) return 0; return 0; } } static inline bool hmp_capable(void) { return false; } static inline bool is_max_capacity_cpu(int cpu) { return true; } static inline bool is_max_capacity_cpu(int cpu) { return true; } static inline bool is_min_capacity_cpu(int cpu) { return true; } static inline int static inline int preferred_cluster(struct sched_cluster *cluster, struct task_struct *p) preferred_cluster(struct sched_cluster *cluster, struct task_struct *p) Loading kernel/sched/sched_avg.c +30 −0 Original line number Original line Diff line number Diff line Loading @@ -33,6 +33,8 @@ static DEFINE_PER_CPU(unsigned long, iowait_prod_sum); static DEFINE_PER_CPU(spinlock_t, nr_lock) = __SPIN_LOCK_UNLOCKED(nr_lock); static DEFINE_PER_CPU(spinlock_t, nr_lock) = __SPIN_LOCK_UNLOCKED(nr_lock); static s64 last_get_time; static s64 last_get_time; static DEFINE_PER_CPU(atomic64_t, last_busy_time) = ATOMIC64_INIT(0); #define DIV64_U64_ROUNDUP(X, Y) div64_u64((X) + (Y - 1), Y) #define DIV64_U64_ROUNDUP(X, Y) div64_u64((X) + (Y - 1), Y) /** /** * sched_get_nr_running_avg * sched_get_nr_running_avg Loading Loading @@ -120,6 +122,27 @@ void sched_get_nr_running_avg(int *avg, int *iowait_avg, int *big_avg, } } EXPORT_SYMBOL(sched_get_nr_running_avg); EXPORT_SYMBOL(sched_get_nr_running_avg); #define BUSY_NR_RUN 3 #define BUSY_LOAD_FACTOR 2 static inline void update_last_busy_time(int cpu, bool dequeue, unsigned long prev_nr_run, u64 curr_time) { bool nr_run_trigger = false, load_trigger = false; if (!hmp_capable() || is_min_capacity_cpu(cpu)) return; if (prev_nr_run >= BUSY_NR_RUN && per_cpu(nr, cpu) < BUSY_NR_RUN) nr_run_trigger = true; if (dequeue && (cpu_util(cpu) * BUSY_LOAD_FACTOR) > capacity_orig_of(cpu)) load_trigger = true; if (nr_run_trigger || load_trigger) atomic64_set(&per_cpu(last_busy_time, cpu), curr_time); } /** /** * sched_update_nr_prod * sched_update_nr_prod * @cpu: The core id of the nr running driver. * @cpu: The core id of the nr running driver. Loading Loading @@ -148,6 +171,8 @@ void sched_update_nr_prod(int cpu, long delta, bool inc) if (per_cpu(nr, cpu) > per_cpu(nr_max, cpu)) if (per_cpu(nr, cpu) > per_cpu(nr_max, cpu)) per_cpu(nr_max, cpu) = per_cpu(nr, cpu); per_cpu(nr_max, cpu) = per_cpu(nr, cpu); update_last_busy_time(cpu, !inc, nr_running, curr_time); per_cpu(nr_prod_sum, cpu) += nr_running * diff; per_cpu(nr_prod_sum, cpu) += nr_running * diff; per_cpu(nr_big_prod_sum, cpu) += nr_eligible_big_tasks(cpu) * diff; per_cpu(nr_big_prod_sum, cpu) += nr_eligible_big_tasks(cpu) * diff; per_cpu(iowait_prod_sum, cpu) += nr_iowait_cpu(cpu) * diff; per_cpu(iowait_prod_sum, cpu) += nr_iowait_cpu(cpu) * diff; Loading Loading @@ -184,3 +209,8 @@ unsigned int sched_get_cpu_util(int cpu) busy = (util * 100) / capacity; busy = (util * 100) / capacity; return busy; return busy; } } u64 sched_get_cpu_last_busy_time(int cpu) { return atomic64_read(&per_cpu(last_busy_time, cpu)); } Loading
include/linux/sched.h +5 −0 Original line number Original line Diff line number Diff line Loading @@ -183,6 +183,7 @@ extern void sched_get_nr_running_avg(int *avg, int *iowait_avg, int *big_avg, unsigned int *max_nr, unsigned int *max_nr, unsigned int *big_max_nr); unsigned int *big_max_nr); extern unsigned int sched_get_cpu_util(int cpu); extern unsigned int sched_get_cpu_util(int cpu); extern u64 sched_get_cpu_last_busy_time(int cpu); #else #else static inline void sched_update_nr_prod(int cpu, long delta, bool inc) static inline void sched_update_nr_prod(int cpu, long delta, bool inc) { { Loading @@ -196,6 +197,10 @@ static inline unsigned int sched_get_cpu_util(int cpu) { { return 0; return 0; } } static inline u64 sched_get_cpu_last_busy_time(int cpu) { return 0; } #endif #endif extern void calc_global_load(unsigned long ticks); extern void calc_global_load(unsigned long ticks); Loading
kernel/sched/sched.h +7 −0 Original line number Original line Diff line number Diff line Loading @@ -2483,6 +2483,11 @@ static inline bool is_max_capacity_cpu(int cpu) return cpu_max_possible_capacity(cpu) == max_possible_capacity; return cpu_max_possible_capacity(cpu) == max_possible_capacity; } } static inline bool is_min_capacity_cpu(int cpu) { return cpu_max_possible_capacity(cpu) == min_max_possible_capacity; } /* /* * 'load' is in reference to "best cpu" at its best frequency. * 'load' is in reference to "best cpu" at its best frequency. * Scale that in reference to a given cpu, accounting for how bad it is * Scale that in reference to a given cpu, accounting for how bad it is Loading Loading @@ -2682,7 +2687,9 @@ static inline int sched_boost(void) return 0; return 0; } } static inline bool hmp_capable(void) { return false; } static inline bool is_max_capacity_cpu(int cpu) { return true; } static inline bool is_max_capacity_cpu(int cpu) { return true; } static inline bool is_min_capacity_cpu(int cpu) { return true; } static inline int static inline int preferred_cluster(struct sched_cluster *cluster, struct task_struct *p) preferred_cluster(struct sched_cluster *cluster, struct task_struct *p) Loading
kernel/sched/sched_avg.c +30 −0 Original line number Original line Diff line number Diff line Loading @@ -33,6 +33,8 @@ static DEFINE_PER_CPU(unsigned long, iowait_prod_sum); static DEFINE_PER_CPU(spinlock_t, nr_lock) = __SPIN_LOCK_UNLOCKED(nr_lock); static DEFINE_PER_CPU(spinlock_t, nr_lock) = __SPIN_LOCK_UNLOCKED(nr_lock); static s64 last_get_time; static s64 last_get_time; static DEFINE_PER_CPU(atomic64_t, last_busy_time) = ATOMIC64_INIT(0); #define DIV64_U64_ROUNDUP(X, Y) div64_u64((X) + (Y - 1), Y) #define DIV64_U64_ROUNDUP(X, Y) div64_u64((X) + (Y - 1), Y) /** /** * sched_get_nr_running_avg * sched_get_nr_running_avg Loading Loading @@ -120,6 +122,27 @@ void sched_get_nr_running_avg(int *avg, int *iowait_avg, int *big_avg, } } EXPORT_SYMBOL(sched_get_nr_running_avg); EXPORT_SYMBOL(sched_get_nr_running_avg); #define BUSY_NR_RUN 3 #define BUSY_LOAD_FACTOR 2 static inline void update_last_busy_time(int cpu, bool dequeue, unsigned long prev_nr_run, u64 curr_time) { bool nr_run_trigger = false, load_trigger = false; if (!hmp_capable() || is_min_capacity_cpu(cpu)) return; if (prev_nr_run >= BUSY_NR_RUN && per_cpu(nr, cpu) < BUSY_NR_RUN) nr_run_trigger = true; if (dequeue && (cpu_util(cpu) * BUSY_LOAD_FACTOR) > capacity_orig_of(cpu)) load_trigger = true; if (nr_run_trigger || load_trigger) atomic64_set(&per_cpu(last_busy_time, cpu), curr_time); } /** /** * sched_update_nr_prod * sched_update_nr_prod * @cpu: The core id of the nr running driver. * @cpu: The core id of the nr running driver. Loading Loading @@ -148,6 +171,8 @@ void sched_update_nr_prod(int cpu, long delta, bool inc) if (per_cpu(nr, cpu) > per_cpu(nr_max, cpu)) if (per_cpu(nr, cpu) > per_cpu(nr_max, cpu)) per_cpu(nr_max, cpu) = per_cpu(nr, cpu); per_cpu(nr_max, cpu) = per_cpu(nr, cpu); update_last_busy_time(cpu, !inc, nr_running, curr_time); per_cpu(nr_prod_sum, cpu) += nr_running * diff; per_cpu(nr_prod_sum, cpu) += nr_running * diff; per_cpu(nr_big_prod_sum, cpu) += nr_eligible_big_tasks(cpu) * diff; per_cpu(nr_big_prod_sum, cpu) += nr_eligible_big_tasks(cpu) * diff; per_cpu(iowait_prod_sum, cpu) += nr_iowait_cpu(cpu) * diff; per_cpu(iowait_prod_sum, cpu) += nr_iowait_cpu(cpu) * diff; Loading Loading @@ -184,3 +209,8 @@ unsigned int sched_get_cpu_util(int cpu) busy = (util * 100) / capacity; busy = (util * 100) / capacity; return busy; return busy; } } u64 sched_get_cpu_last_busy_time(int cpu) { return atomic64_read(&per_cpu(last_busy_time, cpu)); }