Loading kernel/sched/core.c +18 −4 Original line number Diff line number Diff line Loading @@ -1156,6 +1156,9 @@ __read_mostly unsigned int sched_ravg_window = 10000000; /* 1 -> use PELT based load stats, 0 -> use window-based load stats */ unsigned int __read_mostly sched_use_pelt; /* Temporarily disable window-stats activity on all cpus */ unsigned int __read_mostly sched_disable_window_stats; unsigned int max_possible_efficiency = 1024; unsigned int min_possible_efficiency = 1024; Loading Loading @@ -1393,7 +1396,7 @@ static void update_task_ravg(struct task_struct *p, struct rq *rq, u64 window_start; s64 delta = 0; if (sched_use_pelt || !rq->window_start) if (sched_use_pelt || !rq->window_start || sched_disable_window_stats) return; lockdep_assert_held(&rq->lock); Loading Loading @@ -1594,6 +1597,9 @@ static inline void mark_task_starting(struct task_struct *p) struct rq *rq = task_rq(p); u64 wallclock = sched_clock(); if (sched_disable_window_stats) return; if (!rq->window_start) { p->ravg.partial_demand = 0; p->ravg.demand = 0; Loading Loading @@ -1698,9 +1704,11 @@ void sched_exit(struct task_struct *p) /* rq->curr == p */ update_task_ravg(rq->curr, rq, TASK_UPDATE, sched_clock(), 0); dequeue_task(rq, p, 0); if (p->ravg.flags & CURR_WINDOW_CONTRIB) if (!sched_disable_window_stats && (p->ravg.flags & CURR_WINDOW_CONTRIB)) rq->curr_runnable_sum -= p->ravg.partial_demand; if (p->ravg.flags & PREV_WINDOW_CONTRIB) if (!sched_disable_window_stats && (p->ravg.flags & PREV_WINDOW_CONTRIB)) rq->prev_runnable_sum -= p->ravg.demand; BUG_ON((s64)rq->curr_runnable_sum < 0); BUG_ON((s64)rq->prev_runnable_sum < 0); Loading Loading @@ -2000,10 +2008,15 @@ static void fixup_busy_time(struct task_struct *p, int new_cpu) struct rq *src_rq = task_rq(p); struct rq *dest_rq = cpu_rq(new_cpu); u64 wallclock; int freq_notify = 0; if (p->state == TASK_WAKING) double_rq_lock(src_rq, dest_rq); if (sched_disable_window_stats) goto done; freq_notify = 1; wallclock = sched_clock(); update_task_ravg(task_rq(p)->curr, task_rq(p), Loading Loading @@ -2055,10 +2068,11 @@ static void fixup_busy_time(struct task_struct *p, int new_cpu) trace_sched_migration_update_sum(src_rq); trace_sched_migration_update_sum(dest_rq); done: if (p->state == TASK_WAKING) double_rq_unlock(src_rq, dest_rq); if (cpumask_test_cpu(new_cpu, if (!freq_notify && cpumask_test_cpu(new_cpu, &src_rq->freq_domain_cpumask)) return; Loading kernel/sched/fair.c +2 −2 Original line number Diff line number Diff line Loading @@ -1820,7 +1820,7 @@ done: void inc_nr_big_small_task(struct rq *rq, struct task_struct *p) { if (!sched_enable_hmp) if (!sched_enable_hmp || sched_disable_window_stats) return; if (is_big_task(p)) Loading @@ -1831,7 +1831,7 @@ void inc_nr_big_small_task(struct rq *rq, struct task_struct *p) void dec_nr_big_small_task(struct rq *rq, struct task_struct *p) { if (!sched_enable_hmp) if (!sched_enable_hmp || sched_disable_window_stats) return; if (is_big_task(p)) Loading kernel/sched/sched.h +3 −2 Original line number Diff line number Diff line Loading @@ -689,6 +689,7 @@ extern void init_new_task_load(struct task_struct *p); extern unsigned int sched_ravg_window; extern unsigned int sched_use_pelt; extern unsigned int sched_disable_window_stats; extern unsigned int max_possible_freq; extern unsigned int min_max_freq; extern unsigned int pct_task_load(struct task_struct *p); Loading Loading @@ -718,7 +719,7 @@ inc_cumulative_runnable_avg(struct rq *rq, struct task_struct *p) if (sched_use_pelt) rq->cumulative_runnable_avg += p->se.avg.runnable_avg_sum_scaled; else else if (!sched_disable_window_stats) rq->cumulative_runnable_avg += p->ravg.demand; } Loading @@ -728,7 +729,7 @@ dec_cumulative_runnable_avg(struct rq *rq, struct task_struct *p) if (sched_use_pelt) rq->cumulative_runnable_avg -= p->se.avg.runnable_avg_sum_scaled; else else if (!sched_disable_window_stats) rq->cumulative_runnable_avg -= p->ravg.demand; BUG_ON((s64)rq->cumulative_runnable_avg < 0); } Loading Loading
kernel/sched/core.c +18 −4 Original line number Diff line number Diff line Loading @@ -1156,6 +1156,9 @@ __read_mostly unsigned int sched_ravg_window = 10000000; /* 1 -> use PELT based load stats, 0 -> use window-based load stats */ unsigned int __read_mostly sched_use_pelt; /* Temporarily disable window-stats activity on all cpus */ unsigned int __read_mostly sched_disable_window_stats; unsigned int max_possible_efficiency = 1024; unsigned int min_possible_efficiency = 1024; Loading Loading @@ -1393,7 +1396,7 @@ static void update_task_ravg(struct task_struct *p, struct rq *rq, u64 window_start; s64 delta = 0; if (sched_use_pelt || !rq->window_start) if (sched_use_pelt || !rq->window_start || sched_disable_window_stats) return; lockdep_assert_held(&rq->lock); Loading Loading @@ -1594,6 +1597,9 @@ static inline void mark_task_starting(struct task_struct *p) struct rq *rq = task_rq(p); u64 wallclock = sched_clock(); if (sched_disable_window_stats) return; if (!rq->window_start) { p->ravg.partial_demand = 0; p->ravg.demand = 0; Loading Loading @@ -1698,9 +1704,11 @@ void sched_exit(struct task_struct *p) /* rq->curr == p */ update_task_ravg(rq->curr, rq, TASK_UPDATE, sched_clock(), 0); dequeue_task(rq, p, 0); if (p->ravg.flags & CURR_WINDOW_CONTRIB) if (!sched_disable_window_stats && (p->ravg.flags & CURR_WINDOW_CONTRIB)) rq->curr_runnable_sum -= p->ravg.partial_demand; if (p->ravg.flags & PREV_WINDOW_CONTRIB) if (!sched_disable_window_stats && (p->ravg.flags & PREV_WINDOW_CONTRIB)) rq->prev_runnable_sum -= p->ravg.demand; BUG_ON((s64)rq->curr_runnable_sum < 0); BUG_ON((s64)rq->prev_runnable_sum < 0); Loading Loading @@ -2000,10 +2008,15 @@ static void fixup_busy_time(struct task_struct *p, int new_cpu) struct rq *src_rq = task_rq(p); struct rq *dest_rq = cpu_rq(new_cpu); u64 wallclock; int freq_notify = 0; if (p->state == TASK_WAKING) double_rq_lock(src_rq, dest_rq); if (sched_disable_window_stats) goto done; freq_notify = 1; wallclock = sched_clock(); update_task_ravg(task_rq(p)->curr, task_rq(p), Loading Loading @@ -2055,10 +2068,11 @@ static void fixup_busy_time(struct task_struct *p, int new_cpu) trace_sched_migration_update_sum(src_rq); trace_sched_migration_update_sum(dest_rq); done: if (p->state == TASK_WAKING) double_rq_unlock(src_rq, dest_rq); if (cpumask_test_cpu(new_cpu, if (!freq_notify && cpumask_test_cpu(new_cpu, &src_rq->freq_domain_cpumask)) return; Loading
kernel/sched/fair.c +2 −2 Original line number Diff line number Diff line Loading @@ -1820,7 +1820,7 @@ done: void inc_nr_big_small_task(struct rq *rq, struct task_struct *p) { if (!sched_enable_hmp) if (!sched_enable_hmp || sched_disable_window_stats) return; if (is_big_task(p)) Loading @@ -1831,7 +1831,7 @@ void inc_nr_big_small_task(struct rq *rq, struct task_struct *p) void dec_nr_big_small_task(struct rq *rq, struct task_struct *p) { if (!sched_enable_hmp) if (!sched_enable_hmp || sched_disable_window_stats) return; if (is_big_task(p)) Loading
kernel/sched/sched.h +3 −2 Original line number Diff line number Diff line Loading @@ -689,6 +689,7 @@ extern void init_new_task_load(struct task_struct *p); extern unsigned int sched_ravg_window; extern unsigned int sched_use_pelt; extern unsigned int sched_disable_window_stats; extern unsigned int max_possible_freq; extern unsigned int min_max_freq; extern unsigned int pct_task_load(struct task_struct *p); Loading Loading @@ -718,7 +719,7 @@ inc_cumulative_runnable_avg(struct rq *rq, struct task_struct *p) if (sched_use_pelt) rq->cumulative_runnable_avg += p->se.avg.runnable_avg_sum_scaled; else else if (!sched_disable_window_stats) rq->cumulative_runnable_avg += p->ravg.demand; } Loading @@ -728,7 +729,7 @@ dec_cumulative_runnable_avg(struct rq *rq, struct task_struct *p) if (sched_use_pelt) rq->cumulative_runnable_avg -= p->se.avg.runnable_avg_sum_scaled; else else if (!sched_disable_window_stats) rq->cumulative_runnable_avg -= p->ravg.demand; BUG_ON((s64)rq->cumulative_runnable_avg < 0); } Loading