Loading kernel/sched/sched.h +2 −2 Original line number Diff line number Diff line Loading @@ -1965,7 +1965,7 @@ cpu_util_freq_pelt(int cpu) } #ifdef CONFIG_SCHED_WALT extern atomic64_t walt_irq_work_lastq_ws; extern u64 walt_load_reported_window; static inline unsigned long cpu_util_freq_walt(int cpu, struct sched_walt_cpu_load *walt_load) Loading Loading @@ -2003,7 +2003,7 @@ cpu_util_freq_walt(int cpu, struct sched_walt_cpu_load *walt_load) walt_load->prev_window_util = util; walt_load->nl = nl; walt_load->pl = pl; walt_load->ws = atomic64_read(&walt_irq_work_lastq_ws); walt_load->ws = walt_load_reported_window; } return (util >= capacity) ? capacity : util; Loading kernel/sched/walt.c +6 −2 Original line number Diff line number Diff line Loading @@ -45,7 +45,8 @@ const char *migrate_type_names[] = {"GROUP_TO_RQ", "RQ_TO_GROUP", static struct cpu_cycle_counter_cb cpu_cycle_counter_cb; static bool use_cycle_counter; DEFINE_MUTEX(cluster_lock); atomic64_t walt_irq_work_lastq_ws; static atomic64_t walt_irq_work_lastq_ws; u64 walt_load_reported_window; static struct irq_work walt_cpufreq_irq_work; static struct irq_work walt_migration_irq_work; Loading Loading @@ -850,6 +851,9 @@ void set_window_start(struct rq *rq) rq->window_start = 1; sync_cpu_available = 1; atomic64_set(&walt_irq_work_lastq_ws, rq->window_start); walt_load_reported_window = atomic64_read(&walt_irq_work_lastq_ws); } else { struct rq *sync_rq = cpu_rq(cpumask_any(cpu_online_mask)); Loading Loading @@ -3074,7 +3078,7 @@ void walt_irq_work(struct irq_work *irq_work) raw_spin_lock(&cpu_rq(cpu)->lock); wc = ktime_get_ns(); walt_load_reported_window = atomic64_read(&walt_irq_work_lastq_ws); for_each_sched_cluster(cluster) { u64 aggr_grp_load = 0; Loading Loading
kernel/sched/sched.h +2 −2 Original line number Diff line number Diff line Loading @@ -1965,7 +1965,7 @@ cpu_util_freq_pelt(int cpu) } #ifdef CONFIG_SCHED_WALT extern atomic64_t walt_irq_work_lastq_ws; extern u64 walt_load_reported_window; static inline unsigned long cpu_util_freq_walt(int cpu, struct sched_walt_cpu_load *walt_load) Loading Loading @@ -2003,7 +2003,7 @@ cpu_util_freq_walt(int cpu, struct sched_walt_cpu_load *walt_load) walt_load->prev_window_util = util; walt_load->nl = nl; walt_load->pl = pl; walt_load->ws = atomic64_read(&walt_irq_work_lastq_ws); walt_load->ws = walt_load_reported_window; } return (util >= capacity) ? capacity : util; Loading
kernel/sched/walt.c +6 −2 Original line number Diff line number Diff line Loading @@ -45,7 +45,8 @@ const char *migrate_type_names[] = {"GROUP_TO_RQ", "RQ_TO_GROUP", static struct cpu_cycle_counter_cb cpu_cycle_counter_cb; static bool use_cycle_counter; DEFINE_MUTEX(cluster_lock); atomic64_t walt_irq_work_lastq_ws; static atomic64_t walt_irq_work_lastq_ws; u64 walt_load_reported_window; static struct irq_work walt_cpufreq_irq_work; static struct irq_work walt_migration_irq_work; Loading Loading @@ -850,6 +851,9 @@ void set_window_start(struct rq *rq) rq->window_start = 1; sync_cpu_available = 1; atomic64_set(&walt_irq_work_lastq_ws, rq->window_start); walt_load_reported_window = atomic64_read(&walt_irq_work_lastq_ws); } else { struct rq *sync_rq = cpu_rq(cpumask_any(cpu_online_mask)); Loading Loading @@ -3074,7 +3078,7 @@ void walt_irq_work(struct irq_work *irq_work) raw_spin_lock(&cpu_rq(cpu)->lock); wc = ktime_get_ns(); walt_load_reported_window = atomic64_read(&walt_irq_work_lastq_ws); for_each_sched_cluster(cluster) { u64 aggr_grp_load = 0; Loading