Loading include/linux/sched.h +1 −0 Original line number Diff line number Diff line Loading @@ -3885,6 +3885,7 @@ static inline unsigned long rlimit_max(unsigned int limit) #define SCHED_CPUFREQ_RT (1U << 0) #define SCHED_CPUFREQ_DL (1U << 1) #define SCHED_CPUFREQ_IOWAIT (1U << 2) #define SCHED_CPUFREQ_INTERCLUSTER_MIG (1U << 3) #define SCHED_CPUFREQ_RT_DL (SCHED_CPUFREQ_RT | SCHED_CPUFREQ_DL) Loading kernel/sched/core.c +6 −0 Original line number Diff line number Diff line Loading @@ -2172,6 +2172,7 @@ try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags) wallclock = sched_ktime_clock(); update_task_ravg(rq->curr, rq, TASK_UPDATE, wallclock, 0); update_task_ravg(p, rq, TASK_WAKE, wallclock, 0); cpufreq_update_util(rq, 0); raw_spin_unlock(&rq->lock); rcu_read_lock(); Loading Loading @@ -2264,6 +2265,7 @@ static void try_to_wake_up_local(struct task_struct *p, struct pin_cookie cookie update_task_ravg(rq->curr, rq, TASK_UPDATE, wallclock, 0); update_task_ravg(p, rq, TASK_WAKE, wallclock, 0); cpufreq_update_util(rq, 0); ttwu_activate(rq, p, ENQUEUE_WAKEUP); note_task_waking(p, wallclock); } Loading Loading @@ -3370,6 +3372,8 @@ void scheduler_tick(void) wallclock = sched_ktime_clock(); update_task_ravg(rq->curr, rq, TASK_UPDATE, wallclock, 0); cpufreq_update_util(rq, 0); early_notif = early_detection_notify(rq, wallclock); raw_spin_unlock(&rq->lock); Loading Loading @@ -3704,6 +3708,7 @@ static void __sched notrace __schedule(bool preempt) if (likely(prev != next)) { update_task_ravg(prev, rq, PUT_PREV_TASK, wallclock, 0); update_task_ravg(next, rq, PICK_NEXT_TASK, wallclock, 0); cpufreq_update_util(rq, 0); if (!is_idle_task(prev) && !prev->on_rq) update_avg_burst(prev); Loading @@ -3717,6 +3722,7 @@ static void __sched notrace __schedule(bool preempt) rq = context_switch(rq, prev, next, cookie); /* unlocks the rq */ } else { update_task_ravg(prev, rq, TASK_UPDATE, wallclock, 0); cpufreq_update_util(rq, 0); lockdep_unpin_lock(&rq->lock, cookie); raw_spin_unlock_irq(&rq->lock); } Loading kernel/sched/hmp.c +5 −0 Original line number Diff line number Diff line Loading @@ -3610,6 +3610,11 @@ void fixup_busy_time(struct task_struct *p, int new_cpu) migrate_top_tasks(p, src_rq, dest_rq); if (!same_freq_domain(new_cpu, task_cpu(p))) { cpufreq_update_util(dest_rq, SCHED_CPUFREQ_INTERCLUSTER_MIG); cpufreq_update_util(src_rq, SCHED_CPUFREQ_INTERCLUSTER_MIG); } if (p == src_rq->ed_task) { src_rq->ed_task = NULL; if (!dest_rq->ed_task) Loading kernel/sched/sched.h +14 −0 Original line number Diff line number Diff line Loading @@ -79,6 +79,7 @@ struct cpu_cycle { u64 time; }; extern unsigned int sched_disable_window_stats; #endif /* CONFIG_SCHED_HMP */ Loading Loading @@ -770,6 +771,7 @@ struct rq { int cstate, wakeup_latency, wakeup_energy; u64 window_start; u64 load_reported_window; unsigned long hmp_flags; u64 cur_irqload; Loading Loading @@ -2142,6 +2144,18 @@ static inline void cpufreq_update_util(struct rq *rq, unsigned int flags) { struct update_util_data *data; #ifdef CONFIG_SCHED_HMP /* * Skip if we've already reported, but not if this is an inter-cluster * migration */ if (!sched_disable_window_stats && (rq->load_reported_window == rq->window_start) && !(flags & SCHED_CPUFREQ_INTERCLUSTER_MIG)) return; rq->load_reported_window = rq->window_start; #endif data = rcu_dereference_sched(*this_cpu_ptr(&cpufreq_update_util_data)); if (data) data->func(data, rq_clock(rq), flags); Loading Loading
include/linux/sched.h +1 −0 Original line number Diff line number Diff line Loading @@ -3885,6 +3885,7 @@ static inline unsigned long rlimit_max(unsigned int limit) #define SCHED_CPUFREQ_RT (1U << 0) #define SCHED_CPUFREQ_DL (1U << 1) #define SCHED_CPUFREQ_IOWAIT (1U << 2) #define SCHED_CPUFREQ_INTERCLUSTER_MIG (1U << 3) #define SCHED_CPUFREQ_RT_DL (SCHED_CPUFREQ_RT | SCHED_CPUFREQ_DL) Loading
kernel/sched/core.c +6 −0 Original line number Diff line number Diff line Loading @@ -2172,6 +2172,7 @@ try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags) wallclock = sched_ktime_clock(); update_task_ravg(rq->curr, rq, TASK_UPDATE, wallclock, 0); update_task_ravg(p, rq, TASK_WAKE, wallclock, 0); cpufreq_update_util(rq, 0); raw_spin_unlock(&rq->lock); rcu_read_lock(); Loading Loading @@ -2264,6 +2265,7 @@ static void try_to_wake_up_local(struct task_struct *p, struct pin_cookie cookie update_task_ravg(rq->curr, rq, TASK_UPDATE, wallclock, 0); update_task_ravg(p, rq, TASK_WAKE, wallclock, 0); cpufreq_update_util(rq, 0); ttwu_activate(rq, p, ENQUEUE_WAKEUP); note_task_waking(p, wallclock); } Loading Loading @@ -3370,6 +3372,8 @@ void scheduler_tick(void) wallclock = sched_ktime_clock(); update_task_ravg(rq->curr, rq, TASK_UPDATE, wallclock, 0); cpufreq_update_util(rq, 0); early_notif = early_detection_notify(rq, wallclock); raw_spin_unlock(&rq->lock); Loading Loading @@ -3704,6 +3708,7 @@ static void __sched notrace __schedule(bool preempt) if (likely(prev != next)) { update_task_ravg(prev, rq, PUT_PREV_TASK, wallclock, 0); update_task_ravg(next, rq, PICK_NEXT_TASK, wallclock, 0); cpufreq_update_util(rq, 0); if (!is_idle_task(prev) && !prev->on_rq) update_avg_burst(prev); Loading @@ -3717,6 +3722,7 @@ static void __sched notrace __schedule(bool preempt) rq = context_switch(rq, prev, next, cookie); /* unlocks the rq */ } else { update_task_ravg(prev, rq, TASK_UPDATE, wallclock, 0); cpufreq_update_util(rq, 0); lockdep_unpin_lock(&rq->lock, cookie); raw_spin_unlock_irq(&rq->lock); } Loading
kernel/sched/hmp.c +5 −0 Original line number Diff line number Diff line Loading @@ -3610,6 +3610,11 @@ void fixup_busy_time(struct task_struct *p, int new_cpu) migrate_top_tasks(p, src_rq, dest_rq); if (!same_freq_domain(new_cpu, task_cpu(p))) { cpufreq_update_util(dest_rq, SCHED_CPUFREQ_INTERCLUSTER_MIG); cpufreq_update_util(src_rq, SCHED_CPUFREQ_INTERCLUSTER_MIG); } if (p == src_rq->ed_task) { src_rq->ed_task = NULL; if (!dest_rq->ed_task) Loading
kernel/sched/sched.h +14 −0 Original line number Diff line number Diff line Loading @@ -79,6 +79,7 @@ struct cpu_cycle { u64 time; }; extern unsigned int sched_disable_window_stats; #endif /* CONFIG_SCHED_HMP */ Loading Loading @@ -770,6 +771,7 @@ struct rq { int cstate, wakeup_latency, wakeup_energy; u64 window_start; u64 load_reported_window; unsigned long hmp_flags; u64 cur_irqload; Loading Loading @@ -2142,6 +2144,18 @@ static inline void cpufreq_update_util(struct rq *rq, unsigned int flags) { struct update_util_data *data; #ifdef CONFIG_SCHED_HMP /* * Skip if we've already reported, but not if this is an inter-cluster * migration */ if (!sched_disable_window_stats && (rq->load_reported_window == rq->window_start) && !(flags & SCHED_CPUFREQ_INTERCLUSTER_MIG)) return; rq->load_reported_window = rq->window_start; #endif data = rcu_dereference_sched(*this_cpu_ptr(&cpufreq_update_util_data)); if (data) data->func(data, rq_clock(rq), flags); Loading