Loading kernel/sched/cpufreq_schedutil.c +1 −1 Original line number Diff line number Diff line Loading @@ -267,7 +267,7 @@ static void sugov_deferred_update(struct sugov_policy *sg_policy, u64 time, if (use_pelt()) sg_policy->work_in_progress = true; irq_work_queue(&sg_policy->irq_work); sched_irq_work_queue(&sg_policy->irq_work); } #define TARGET_LOAD 80 Loading kernel/sched/sched.h +10 −0 Original line number Diff line number Diff line Loading @@ -3096,3 +3096,13 @@ struct sched_avg_stats { int nr_scaled; }; extern void sched_get_nr_running_avg(struct sched_avg_stats *stats); #ifdef CONFIG_SMP static inline void sched_irq_work_queue(struct irq_work *work) { if (likely(cpu_online(raw_smp_processor_id()))) irq_work_queue(work); else irq_work_queue_on(work, cpumask_any(cpu_online_mask)); } #endif kernel/sched/walt.c +2 −2 Original line number Diff line number Diff line Loading @@ -982,7 +982,7 @@ void fixup_busy_time(struct task_struct *p, int new_cpu) if (!same_freq_domain(new_cpu, task_cpu(p))) { src_rq->notif_pending = true; dest_rq->notif_pending = true; irq_work_queue(&walt_migration_irq_work); sched_irq_work_queue(&walt_migration_irq_work); } if (is_ed_enabled()) { Loading Loading @@ -2072,7 +2072,7 @@ static inline void run_walt_irq_work(u64 old_window_start, struct rq *rq) result = atomic64_cmpxchg(&walt_irq_work_lastq_ws, old_window_start, rq->window_start); if (result == old_window_start) irq_work_queue(&walt_cpufreq_irq_work); sched_irq_work_queue(&walt_cpufreq_irq_work); } /* Reflect task activity on its demand and cpu's busy time statistics */ Loading Loading
kernel/sched/cpufreq_schedutil.c +1 −1 Original line number Diff line number Diff line Loading @@ -267,7 +267,7 @@ static void sugov_deferred_update(struct sugov_policy *sg_policy, u64 time, if (use_pelt()) sg_policy->work_in_progress = true; irq_work_queue(&sg_policy->irq_work); sched_irq_work_queue(&sg_policy->irq_work); } #define TARGET_LOAD 80 Loading
kernel/sched/sched.h +10 −0 Original line number Diff line number Diff line Loading @@ -3096,3 +3096,13 @@ struct sched_avg_stats { int nr_scaled; }; extern void sched_get_nr_running_avg(struct sched_avg_stats *stats); #ifdef CONFIG_SMP static inline void sched_irq_work_queue(struct irq_work *work) { if (likely(cpu_online(raw_smp_processor_id()))) irq_work_queue(work); else irq_work_queue_on(work, cpumask_any(cpu_online_mask)); } #endif
kernel/sched/walt.c +2 −2 Original line number Diff line number Diff line Loading @@ -982,7 +982,7 @@ void fixup_busy_time(struct task_struct *p, int new_cpu) if (!same_freq_domain(new_cpu, task_cpu(p))) { src_rq->notif_pending = true; dest_rq->notif_pending = true; irq_work_queue(&walt_migration_irq_work); sched_irq_work_queue(&walt_migration_irq_work); } if (is_ed_enabled()) { Loading Loading @@ -2072,7 +2072,7 @@ static inline void run_walt_irq_work(u64 old_window_start, struct rq *rq) result = atomic64_cmpxchg(&walt_irq_work_lastq_ws, old_window_start, rq->window_start); if (result == old_window_start) irq_work_queue(&walt_cpufreq_irq_work); sched_irq_work_queue(&walt_cpufreq_irq_work); } /* Reflect task activity on its demand and cpu's busy time statistics */ Loading