Loading kernel/sched/cpufreq_schedutil.c +4 −2 Original line number Diff line number Diff line Loading @@ -185,7 +185,9 @@ static void sugov_track_cycles(struct sugov_policy *sg_policy, /* Track cycles in current window */ delta_ns = upto - sg_policy->last_cyc_update_time; cycles = (prev_freq * delta_ns) / (NSEC_PER_SEC / KHZ); delta_ns *= prev_freq; do_div(delta_ns, (NSEC_PER_SEC / KHZ)); cycles = delta_ns; sg_policy->curr_cycles += cycles; sg_policy->last_cyc_update_time = upto; } Loading @@ -199,7 +201,7 @@ static void sugov_calc_avg_cap(struct sugov_policy *sg_policy, u64 curr_ws, if (unlikely(!sysctl_sched_use_walt_cpu_util)) return; WARN_ON(curr_ws < last_ws); BUG_ON(curr_ws < last_ws); if (curr_ws <= last_ws) return; Loading Loading
kernel/sched/cpufreq_schedutil.c +4 −2 Original line number Diff line number Diff line Loading @@ -185,7 +185,9 @@ static void sugov_track_cycles(struct sugov_policy *sg_policy, /* Track cycles in current window */ delta_ns = upto - sg_policy->last_cyc_update_time; cycles = (prev_freq * delta_ns) / (NSEC_PER_SEC / KHZ); delta_ns *= prev_freq; do_div(delta_ns, (NSEC_PER_SEC / KHZ)); cycles = delta_ns; sg_policy->curr_cycles += cycles; sg_policy->last_cyc_update_time = upto; } Loading @@ -199,7 +201,7 @@ static void sugov_calc_avg_cap(struct sugov_policy *sg_policy, u64 curr_ws, if (unlikely(!sysctl_sched_use_walt_cpu_util)) return; WARN_ON(curr_ws < last_ws); BUG_ON(curr_ws < last_ws); if (curr_ws <= last_ws) return; Loading