Loading kernel/sched/core.c +8 −0 Original line number Diff line number Diff line Loading @@ -2171,6 +2171,14 @@ try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags, out: raw_spin_unlock_irqrestore(&p->pi_lock, flags); if (success && sched_predl) { raw_spin_lock_irqsave(&cpu_rq(cpu)->lock, flags); if (do_pl_notif(cpu_rq(cpu))) cpufreq_update_util(cpu_rq(cpu), SCHED_CPUFREQ_WALT | SCHED_CPUFREQ_PL); raw_spin_unlock_irqrestore(&cpu_rq(cpu)->lock, flags); } return success; } Loading kernel/sched/sched.h +3 −16 Original line number Diff line number Diff line Loading @@ -852,7 +852,6 @@ struct rq { int cstate, wakeup_latency, wakeup_energy; u64 window_start; s64 cum_window_start; u64 load_reported_window; unsigned long walt_flags; u64 cur_irqload; Loading Loading @@ -1966,6 +1965,8 @@ cpu_util_freq_pelt(int cpu) } #ifdef CONFIG_SCHED_WALT extern atomic64_t walt_irq_work_lastq_ws; static inline unsigned long cpu_util_freq_walt(int cpu, struct sched_walt_cpu_load *walt_load) { Loading Loading @@ -2002,7 +2003,7 @@ cpu_util_freq_walt(int cpu, struct sched_walt_cpu_load *walt_load) walt_load->prev_window_util = util; walt_load->nl = nl; walt_load->pl = pl; walt_load->ws = rq->load_reported_window; walt_load->ws = atomic64_read(&walt_irq_work_lastq_ws); } return (util >= capacity) ? capacity : util; Loading Loading @@ -2453,22 +2454,8 @@ static inline void cpufreq_update_util(struct rq *rq, unsigned int flags) struct update_util_data *data; #ifdef CONFIG_SCHED_WALT unsigned int exception_flags = SCHED_CPUFREQ_INTERCLUSTER_MIG | SCHED_CPUFREQ_PL | SCHED_CPUFREQ_EARLY_DET | SCHED_CPUFREQ_FORCE_UPDATE; /* * Skip if we've already reported, but not if this is an inter-cluster * migration. Also only allow WALT update sites. */ if (!(flags & SCHED_CPUFREQ_WALT)) return; if (!sched_disable_window_stats && (rq->load_reported_window == rq->window_start) && !(flags & exception_flags)) return; if (!(flags & exception_flags)) rq->load_reported_window = rq->window_start; #endif data = rcu_dereference_sched(*per_cpu_ptr(&cpufreq_update_util_data, Loading kernel/sched/walt.c +1 −1 Original line number Diff line number Diff line Loading @@ -45,7 +45,7 @@ const char *migrate_type_names[] = {"GROUP_TO_RQ", "RQ_TO_GROUP", static struct cpu_cycle_counter_cb cpu_cycle_counter_cb; static bool use_cycle_counter; DEFINE_MUTEX(cluster_lock); static atomic64_t walt_irq_work_lastq_ws; atomic64_t walt_irq_work_lastq_ws; static struct irq_work walt_cpufreq_irq_work; static struct irq_work walt_migration_irq_work; Loading Loading
kernel/sched/core.c +8 −0 Original line number Diff line number Diff line Loading @@ -2171,6 +2171,14 @@ try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags, out: raw_spin_unlock_irqrestore(&p->pi_lock, flags); if (success && sched_predl) { raw_spin_lock_irqsave(&cpu_rq(cpu)->lock, flags); if (do_pl_notif(cpu_rq(cpu))) cpufreq_update_util(cpu_rq(cpu), SCHED_CPUFREQ_WALT | SCHED_CPUFREQ_PL); raw_spin_unlock_irqrestore(&cpu_rq(cpu)->lock, flags); } return success; } Loading
kernel/sched/sched.h +3 −16 Original line number Diff line number Diff line Loading @@ -852,7 +852,6 @@ struct rq { int cstate, wakeup_latency, wakeup_energy; u64 window_start; s64 cum_window_start; u64 load_reported_window; unsigned long walt_flags; u64 cur_irqload; Loading Loading @@ -1966,6 +1965,8 @@ cpu_util_freq_pelt(int cpu) } #ifdef CONFIG_SCHED_WALT extern atomic64_t walt_irq_work_lastq_ws; static inline unsigned long cpu_util_freq_walt(int cpu, struct sched_walt_cpu_load *walt_load) { Loading Loading @@ -2002,7 +2003,7 @@ cpu_util_freq_walt(int cpu, struct sched_walt_cpu_load *walt_load) walt_load->prev_window_util = util; walt_load->nl = nl; walt_load->pl = pl; walt_load->ws = rq->load_reported_window; walt_load->ws = atomic64_read(&walt_irq_work_lastq_ws); } return (util >= capacity) ? capacity : util; Loading Loading @@ -2453,22 +2454,8 @@ static inline void cpufreq_update_util(struct rq *rq, unsigned int flags) struct update_util_data *data; #ifdef CONFIG_SCHED_WALT unsigned int exception_flags = SCHED_CPUFREQ_INTERCLUSTER_MIG | SCHED_CPUFREQ_PL | SCHED_CPUFREQ_EARLY_DET | SCHED_CPUFREQ_FORCE_UPDATE; /* * Skip if we've already reported, but not if this is an inter-cluster * migration. Also only allow WALT update sites. */ if (!(flags & SCHED_CPUFREQ_WALT)) return; if (!sched_disable_window_stats && (rq->load_reported_window == rq->window_start) && !(flags & exception_flags)) return; if (!(flags & exception_flags)) rq->load_reported_window = rq->window_start; #endif data = rcu_dereference_sched(*per_cpu_ptr(&cpufreq_update_util_data, Loading
kernel/sched/walt.c +1 −1 Original line number Diff line number Diff line Loading @@ -45,7 +45,7 @@ const char *migrate_type_names[] = {"GROUP_TO_RQ", "RQ_TO_GROUP", static struct cpu_cycle_counter_cb cpu_cycle_counter_cb; static bool use_cycle_counter; DEFINE_MUTEX(cluster_lock); static atomic64_t walt_irq_work_lastq_ws; atomic64_t walt_irq_work_lastq_ws; static struct irq_work walt_cpufreq_irq_work; static struct irq_work walt_migration_irq_work; Loading