Loading include/linux/sched.h +1 −0 Original line number Diff line number Diff line Loading @@ -3882,6 +3882,7 @@ static inline unsigned long rlimit_max(unsigned int limit) #define SCHED_CPUFREQ_IOWAIT (1U << 2) #define SCHED_CPUFREQ_INTERCLUSTER_MIG (1U << 3) #define SCHED_CPUFREQ_WALT (1U << 4) #define SCHED_CPUFREQ_PL (1U << 5) #define SCHED_CPUFREQ_RT_DL (SCHED_CPUFREQ_RT | SCHED_CPUFREQ_DL) Loading kernel/sched/core.c +11 −0 Original line number Diff line number Diff line Loading @@ -2196,6 +2196,15 @@ try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags) out: raw_spin_unlock_irqrestore(&p->pi_lock, flags); if (success && sched_predl) { raw_spin_lock_irqsave(&cpu_rq(cpu)->lock, flags); if (do_pl_notif(cpu_rq(cpu))) cpufreq_update_util(cpu_rq(cpu), SCHED_CPUFREQ_WALT | SCHED_CPUFREQ_PL); raw_spin_unlock_irqrestore(&cpu_rq(cpu)->lock, flags); } return success; } Loading Loading @@ -9585,3 +9594,5 @@ void sched_exit(struct task_struct *p) task_rq_unlock(rq, p, &rf); } #endif /* CONFIG_SCHED_WALT */ __read_mostly bool sched_predl; kernel/sched/sched.h +12 −2 Original line number Diff line number Diff line Loading @@ -25,6 +25,8 @@ struct rq; struct cpuidle_state; extern __read_mostly bool sched_predl; #ifdef CONFIG_SCHED_WALT extern unsigned int sched_ravg_window; Loading Loading @@ -1802,13 +1804,18 @@ cpu_util_freq(int cpu, struct sched_walt_cpu_load *walt_load) if (walt_load) { u64 nl = cpu_rq(cpu)->nt_prev_runnable_sum + rq->grp_time.nt_prev_runnable_sum; u64 pl = rq->walt_stats.pred_demands_sum; nl = div64_u64(nl, sched_ravg_window >> SCHED_CAPACITY_SHIFT); pl = div64_u64(pl, sched_ravg_window >> SCHED_CAPACITY_SHIFT); walt_load->prev_window_util = util; walt_load->nl = nl; walt_load->pl = 0; walt_load->pl = pl; rq->old_busy_time = util; rq->old_estimated_time = pl; walt_load->ws = rq->window_start; } } Loading Loading @@ -2230,6 +2237,9 @@ static inline void cpufreq_update_util(struct rq *rq, unsigned int flags) struct update_util_data *data; #ifdef CONFIG_SCHED_WALT unsigned int exception_flags = SCHED_CPUFREQ_INTERCLUSTER_MIG | SCHED_CPUFREQ_PL; /* * Skip if we've already reported, but not if this is an inter-cluster * migration. Also only allow WALT update sites. Loading @@ -2238,7 +2248,7 @@ static inline void cpufreq_update_util(struct rq *rq, unsigned int flags) return; if (!sched_disable_window_stats && (rq->load_reported_window == rq->window_start) && !(flags & SCHED_CPUFREQ_INTERCLUSTER_MIG)) !(flags & exception_flags)) return; rq->load_reported_window = rq->window_start; #endif Loading kernel/sched/walt.c +27 −2 Original line number Diff line number Diff line Loading @@ -116,8 +116,6 @@ __read_mostly unsigned int sysctl_sched_cpu_high_irqload = (10 * NSEC_PER_MSEC); * IMPORTANT: Initialize both copies to same value!! */ static __read_mostly bool sched_predl; __read_mostly unsigned int sched_ravg_hist_size = 5; __read_mostly unsigned int sysctl_sched_ravg_hist_size = 5; Loading Loading @@ -1279,6 +1277,33 @@ static inline u64 scale_exec_time(u64 delta, struct rq *rq) return delta; } /* Convert busy time to frequency equivalent * Assumes load is scaled to 1024 */ static inline unsigned int load_to_freq(struct rq *rq, u64 load) { return mult_frac(cpu_max_possible_freq(cpu_of(rq)), load, capacity_orig_of(cpu_of(rq))); } bool do_pl_notif(struct rq *rq) { u64 prev = rq->old_busy_time; u64 pl = rq->walt_stats.pred_demands_sum; int cpu = cpu_of(rq); /* If already at max freq, bail out */ if (capacity_orig_of(cpu) == capacity_curr_of(cpu)) return false; prev = max(prev, rq->old_estimated_time); pl = div64_u64(pl, sched_ravg_window >> SCHED_CAPACITY_SHIFT); /* 400 MHz filter. */ return (pl > prev) && (load_to_freq(rq, pl - prev) > 400000); } static void rollover_cpu_window(struct rq *rq, bool full_window) { u64 curr_sum = rq->curr_runnable_sum; Loading kernel/sched/walt.h +2 −0 Original line number Diff line number Diff line Loading @@ -162,6 +162,7 @@ extern void mark_task_starting(struct task_struct *p); extern void set_window_start(struct rq *rq); void account_irqtime(int cpu, struct task_struct *curr, u64 delta, u64 wallclock); extern bool do_pl_notif(struct rq *rq); #define SCHED_HIGH_IRQ_TIMEOUT 3 static inline u64 sched_irqload(int cpu) Loading Loading @@ -349,6 +350,7 @@ static inline void sched_account_irqtime(int cpu, struct task_struct *curr, } static inline int same_cluster(int src_cpu, int dst_cpu) { return 1; } static inline bool do_pl_notif(struct rq *rq) { return false; } static inline void inc_rq_walt_stats(struct rq *rq, struct task_struct *p) { } Loading Loading
include/linux/sched.h +1 −0 Original line number Diff line number Diff line Loading @@ -3882,6 +3882,7 @@ static inline unsigned long rlimit_max(unsigned int limit) #define SCHED_CPUFREQ_IOWAIT (1U << 2) #define SCHED_CPUFREQ_INTERCLUSTER_MIG (1U << 3) #define SCHED_CPUFREQ_WALT (1U << 4) #define SCHED_CPUFREQ_PL (1U << 5) #define SCHED_CPUFREQ_RT_DL (SCHED_CPUFREQ_RT | SCHED_CPUFREQ_DL) Loading
kernel/sched/core.c +11 −0 Original line number Diff line number Diff line Loading @@ -2196,6 +2196,15 @@ try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags) out: raw_spin_unlock_irqrestore(&p->pi_lock, flags); if (success && sched_predl) { raw_spin_lock_irqsave(&cpu_rq(cpu)->lock, flags); if (do_pl_notif(cpu_rq(cpu))) cpufreq_update_util(cpu_rq(cpu), SCHED_CPUFREQ_WALT | SCHED_CPUFREQ_PL); raw_spin_unlock_irqrestore(&cpu_rq(cpu)->lock, flags); } return success; } Loading Loading @@ -9585,3 +9594,5 @@ void sched_exit(struct task_struct *p) task_rq_unlock(rq, p, &rf); } #endif /* CONFIG_SCHED_WALT */ __read_mostly bool sched_predl;
kernel/sched/sched.h +12 −2 Original line number Diff line number Diff line Loading @@ -25,6 +25,8 @@ struct rq; struct cpuidle_state; extern __read_mostly bool sched_predl; #ifdef CONFIG_SCHED_WALT extern unsigned int sched_ravg_window; Loading Loading @@ -1802,13 +1804,18 @@ cpu_util_freq(int cpu, struct sched_walt_cpu_load *walt_load) if (walt_load) { u64 nl = cpu_rq(cpu)->nt_prev_runnable_sum + rq->grp_time.nt_prev_runnable_sum; u64 pl = rq->walt_stats.pred_demands_sum; nl = div64_u64(nl, sched_ravg_window >> SCHED_CAPACITY_SHIFT); pl = div64_u64(pl, sched_ravg_window >> SCHED_CAPACITY_SHIFT); walt_load->prev_window_util = util; walt_load->nl = nl; walt_load->pl = 0; walt_load->pl = pl; rq->old_busy_time = util; rq->old_estimated_time = pl; walt_load->ws = rq->window_start; } } Loading Loading @@ -2230,6 +2237,9 @@ static inline void cpufreq_update_util(struct rq *rq, unsigned int flags) struct update_util_data *data; #ifdef CONFIG_SCHED_WALT unsigned int exception_flags = SCHED_CPUFREQ_INTERCLUSTER_MIG | SCHED_CPUFREQ_PL; /* * Skip if we've already reported, but not if this is an inter-cluster * migration. Also only allow WALT update sites. Loading @@ -2238,7 +2248,7 @@ static inline void cpufreq_update_util(struct rq *rq, unsigned int flags) return; if (!sched_disable_window_stats && (rq->load_reported_window == rq->window_start) && !(flags & SCHED_CPUFREQ_INTERCLUSTER_MIG)) !(flags & exception_flags)) return; rq->load_reported_window = rq->window_start; #endif Loading
kernel/sched/walt.c +27 −2 Original line number Diff line number Diff line Loading @@ -116,8 +116,6 @@ __read_mostly unsigned int sysctl_sched_cpu_high_irqload = (10 * NSEC_PER_MSEC); * IMPORTANT: Initialize both copies to same value!! */ static __read_mostly bool sched_predl; __read_mostly unsigned int sched_ravg_hist_size = 5; __read_mostly unsigned int sysctl_sched_ravg_hist_size = 5; Loading Loading @@ -1279,6 +1277,33 @@ static inline u64 scale_exec_time(u64 delta, struct rq *rq) return delta; } /* Convert busy time to frequency equivalent * Assumes load is scaled to 1024 */ static inline unsigned int load_to_freq(struct rq *rq, u64 load) { return mult_frac(cpu_max_possible_freq(cpu_of(rq)), load, capacity_orig_of(cpu_of(rq))); } bool do_pl_notif(struct rq *rq) { u64 prev = rq->old_busy_time; u64 pl = rq->walt_stats.pred_demands_sum; int cpu = cpu_of(rq); /* If already at max freq, bail out */ if (capacity_orig_of(cpu) == capacity_curr_of(cpu)) return false; prev = max(prev, rq->old_estimated_time); pl = div64_u64(pl, sched_ravg_window >> SCHED_CAPACITY_SHIFT); /* 400 MHz filter. */ return (pl > prev) && (load_to_freq(rq, pl - prev) > 400000); } static void rollover_cpu_window(struct rq *rq, bool full_window) { u64 curr_sum = rq->curr_runnable_sum; Loading
kernel/sched/walt.h +2 −0 Original line number Diff line number Diff line Loading @@ -162,6 +162,7 @@ extern void mark_task_starting(struct task_struct *p); extern void set_window_start(struct rq *rq); void account_irqtime(int cpu, struct task_struct *curr, u64 delta, u64 wallclock); extern bool do_pl_notif(struct rq *rq); #define SCHED_HIGH_IRQ_TIMEOUT 3 static inline u64 sched_irqload(int cpu) Loading Loading @@ -349,6 +350,7 @@ static inline void sched_account_irqtime(int cpu, struct task_struct *curr, } static inline int same_cluster(int src_cpu, int dst_cpu) { return 1; } static inline bool do_pl_notif(struct rq *rq) { return false; } static inline void inc_rq_walt_stats(struct rq *rq, struct task_struct *p) { } Loading