Loading kernel/sched/walt.c +4 −0 Original line number Diff line number Diff line Loading @@ -481,6 +481,7 @@ static inline u64 freq_policy_load(struct rq *rq) struct sched_cluster *cluster = rq->cluster; u64 aggr_grp_load = cluster->aggr_grp_load; u64 load, tt_load = 0; struct task_struct *cpu_ksoftirqd = per_cpu(ksoftirqd, cpu_of(rq)); if (rq->ed_task != NULL) { load = sched_ravg_window; Loading @@ -492,6 +493,9 @@ static inline u64 freq_policy_load(struct rq *rq) else load = rq->prev_runnable_sum + rq->grp_time.prev_runnable_sum; if (cpu_ksoftirqd && cpu_ksoftirqd->state == TASK_RUNNING) load = max_t(u64, load, task_load(cpu_ksoftirqd)); tt_load = top_task_load(rq); switch (reporting_policy) { case FREQ_REPORT_MAX_CPU_LOAD_TOP_TASK: Loading Loading
kernel/sched/walt.c +4 −0 Original line number Diff line number Diff line Loading @@ -481,6 +481,7 @@ static inline u64 freq_policy_load(struct rq *rq) struct sched_cluster *cluster = rq->cluster; u64 aggr_grp_load = cluster->aggr_grp_load; u64 load, tt_load = 0; struct task_struct *cpu_ksoftirqd = per_cpu(ksoftirqd, cpu_of(rq)); if (rq->ed_task != NULL) { load = sched_ravg_window; Loading @@ -492,6 +493,9 @@ static inline u64 freq_policy_load(struct rq *rq) else load = rq->prev_runnable_sum + rq->grp_time.prev_runnable_sum; if (cpu_ksoftirqd && cpu_ksoftirqd->state == TASK_RUNNING) load = max_t(u64, load, task_load(cpu_ksoftirqd)); tt_load = top_task_load(rq); switch (reporting_policy) { case FREQ_REPORT_MAX_CPU_LOAD_TOP_TASK: Loading