Loading include/linux/sched.h +1 −1 Original line number Diff line number Diff line Loading @@ -832,7 +832,7 @@ struct task_struct { struct list_head grp_list; u64 cpu_cycles; bool misfit; u8 unfilter; u32 unfilter; #endif #ifdef CONFIG_CGROUP_SCHED Loading include/linux/sched/sysctl.h +1 −1 Original line number Diff line number Diff line Loading @@ -43,7 +43,7 @@ extern unsigned int __weak sysctl_sched_min_task_util_for_boost; extern unsigned int __weak sysctl_sched_min_task_util_for_colocation; extern unsigned int __weak sysctl_sched_asym_cap_sibling_freq_match_pct; extern unsigned int __weak sysctl_sched_coloc_downmigrate_ns; extern unsigned int __weak sysctl_sched_task_unfilter_nr_windows; extern unsigned int __weak sysctl_sched_task_unfilter_period; extern unsigned int __weak sysctl_sched_busy_hyst_enable_cpus; extern unsigned int __weak sysctl_sched_busy_hyst; extern unsigned int __weak sysctl_sched_coloc_busy_hyst_enable_cpus; Loading include/trace/events/sched.h +2 −2 Original line number Diff line number Diff line Loading @@ -1019,7 +1019,7 @@ TRACE_EVENT(sched_task_util, __field(bool, is_rtg) __field(bool, rtg_skip_min) __field(int, start_cpu) __field(int, unfilter) __field(u32, unfilter) __field(unsigned long, cpus_allowed) __field(int, task_boost) ), Loading Loading @@ -1054,7 +1054,7 @@ TRACE_EVENT(sched_task_util, #endif ), TP_printk("pid=%d comm=%s util=%lu prev_cpu=%d candidates=%#lx best_energy_cpu=%d sync=%d need_idle=%d fastpath=%d placement_boost=%d latency=%llu stune_boosted=%d is_rtg=%d rtg_skip_min=%d start_cpu=%d unfilter=%d affinity=%lx task_boost=%d", TP_printk("pid=%d comm=%s util=%lu prev_cpu=%d candidates=%#lx best_energy_cpu=%d sync=%d need_idle=%d fastpath=%d placement_boost=%d latency=%llu stune_boosted=%d is_rtg=%d rtg_skip_min=%d start_cpu=%d unfilter=%u affinity=%lx task_boost=%d", __entry->pid, __entry->comm, __entry->util, __entry->prev_cpu, __entry->candidates, __entry->best_energy_cpu, __entry->sync, __entry->need_idle, __entry->fastpath, __entry->placement_boost, Loading kernel/sched/fair.c +32 −3 Original line number Diff line number Diff line Loading @@ -8870,6 +8870,11 @@ group_is_overloaded(struct lb_env *env, struct sg_lb_stats *sgs) if (sgs->sum_nr_running <= sgs->group_weight) return false; #ifdef CONFIG_SCHED_WALT if (env->idle != CPU_NOT_IDLE && walt_rotation_enabled) return true; #endif if ((sgs->group_capacity * 100) < (sgs->group_util * env->sd->imbalance_pct)) return true; Loading Loading @@ -11121,6 +11126,27 @@ static inline bool nohz_idle_balance(struct rq *this_rq, enum cpu_idle_type idle static inline void nohz_newidle_balance(struct rq *this_rq) { } #endif /* CONFIG_NO_HZ_COMMON */ #ifdef CONFIG_SCHED_WALT static bool silver_has_big_tasks(void) { int cpu; for_each_possible_cpu(cpu) { if (!is_min_capacity_cpu(cpu)) break; if (cpu_rq(cpu)->walt_stats.nr_big_tasks) return true; } return false; } #else static inline bool silver_has_big_tasks(void) { return false; } #endif /* * idle_balance is called by schedule() if this_cpu is about to become * idle. Attempts to pull tasks from other CPUs. Loading @@ -11132,6 +11158,7 @@ int newidle_balance(struct rq *this_rq, struct rq_flags *rf) struct sched_domain *sd; int pulled_task = 0; u64 curr_cost = 0; u64 avg_idle = this_rq->avg_idle; if (cpu_isolated(this_cpu)) return 0; Loading @@ -11148,7 +11175,9 @@ int newidle_balance(struct rq *this_rq, struct rq_flags *rf) */ if (!cpu_active(this_cpu)) return 0; if (!is_min_capacity_cpu(this_cpu) && silver_has_big_tasks() && (atomic_read(&this_rq->nr_iowait) == 0)) avg_idle = ULLONG_MAX; /* * This is OK, because current is on_cpu, which avoids it being picked * for load-balance and preemption/IRQs are still disabled avoiding Loading @@ -11157,7 +11186,7 @@ int newidle_balance(struct rq *this_rq, struct rq_flags *rf) */ rq_unpin_lock(this_rq, rf); if (this_rq->avg_idle < sysctl_sched_migration_cost || if (avg_idle < sysctl_sched_migration_cost || !READ_ONCE(this_rq->rd->overload)) { rcu_read_lock(); Loading @@ -11182,7 +11211,7 @@ int newidle_balance(struct rq *this_rq, struct rq_flags *rf) if (!(sd->flags & SD_LOAD_BALANCE)) continue; if (this_rq->avg_idle < curr_cost + sd->max_newidle_lb_cost) { if (avg_idle < curr_cost + sd->max_newidle_lb_cost) { update_next_balance(sd, &next_balance); break; } Loading kernel/sched/walt.h +2 −0 Original line number Diff line number Diff line Loading @@ -13,6 +13,8 @@ #define EXITING_TASK_MARKER 0xdeaddead extern unsigned int walt_rotation_enabled; extern void __weak walt_update_task_ravg(struct task_struct *p, struct rq *rq, int event, u64 wallclock, u64 irqtime); Loading Loading
include/linux/sched.h +1 −1 Original line number Diff line number Diff line Loading @@ -832,7 +832,7 @@ struct task_struct { struct list_head grp_list; u64 cpu_cycles; bool misfit; u8 unfilter; u32 unfilter; #endif #ifdef CONFIG_CGROUP_SCHED Loading
include/linux/sched/sysctl.h +1 −1 Original line number Diff line number Diff line Loading @@ -43,7 +43,7 @@ extern unsigned int __weak sysctl_sched_min_task_util_for_boost; extern unsigned int __weak sysctl_sched_min_task_util_for_colocation; extern unsigned int __weak sysctl_sched_asym_cap_sibling_freq_match_pct; extern unsigned int __weak sysctl_sched_coloc_downmigrate_ns; extern unsigned int __weak sysctl_sched_task_unfilter_nr_windows; extern unsigned int __weak sysctl_sched_task_unfilter_period; extern unsigned int __weak sysctl_sched_busy_hyst_enable_cpus; extern unsigned int __weak sysctl_sched_busy_hyst; extern unsigned int __weak sysctl_sched_coloc_busy_hyst_enable_cpus; Loading
include/trace/events/sched.h +2 −2 Original line number Diff line number Diff line Loading @@ -1019,7 +1019,7 @@ TRACE_EVENT(sched_task_util, __field(bool, is_rtg) __field(bool, rtg_skip_min) __field(int, start_cpu) __field(int, unfilter) __field(u32, unfilter) __field(unsigned long, cpus_allowed) __field(int, task_boost) ), Loading Loading @@ -1054,7 +1054,7 @@ TRACE_EVENT(sched_task_util, #endif ), TP_printk("pid=%d comm=%s util=%lu prev_cpu=%d candidates=%#lx best_energy_cpu=%d sync=%d need_idle=%d fastpath=%d placement_boost=%d latency=%llu stune_boosted=%d is_rtg=%d rtg_skip_min=%d start_cpu=%d unfilter=%d affinity=%lx task_boost=%d", TP_printk("pid=%d comm=%s util=%lu prev_cpu=%d candidates=%#lx best_energy_cpu=%d sync=%d need_idle=%d fastpath=%d placement_boost=%d latency=%llu stune_boosted=%d is_rtg=%d rtg_skip_min=%d start_cpu=%d unfilter=%u affinity=%lx task_boost=%d", __entry->pid, __entry->comm, __entry->util, __entry->prev_cpu, __entry->candidates, __entry->best_energy_cpu, __entry->sync, __entry->need_idle, __entry->fastpath, __entry->placement_boost, Loading
kernel/sched/fair.c +32 −3 Original line number Diff line number Diff line Loading @@ -8870,6 +8870,11 @@ group_is_overloaded(struct lb_env *env, struct sg_lb_stats *sgs) if (sgs->sum_nr_running <= sgs->group_weight) return false; #ifdef CONFIG_SCHED_WALT if (env->idle != CPU_NOT_IDLE && walt_rotation_enabled) return true; #endif if ((sgs->group_capacity * 100) < (sgs->group_util * env->sd->imbalance_pct)) return true; Loading Loading @@ -11121,6 +11126,27 @@ static inline bool nohz_idle_balance(struct rq *this_rq, enum cpu_idle_type idle static inline void nohz_newidle_balance(struct rq *this_rq) { } #endif /* CONFIG_NO_HZ_COMMON */ #ifdef CONFIG_SCHED_WALT static bool silver_has_big_tasks(void) { int cpu; for_each_possible_cpu(cpu) { if (!is_min_capacity_cpu(cpu)) break; if (cpu_rq(cpu)->walt_stats.nr_big_tasks) return true; } return false; } #else static inline bool silver_has_big_tasks(void) { return false; } #endif /* * idle_balance is called by schedule() if this_cpu is about to become * idle. Attempts to pull tasks from other CPUs. Loading @@ -11132,6 +11158,7 @@ int newidle_balance(struct rq *this_rq, struct rq_flags *rf) struct sched_domain *sd; int pulled_task = 0; u64 curr_cost = 0; u64 avg_idle = this_rq->avg_idle; if (cpu_isolated(this_cpu)) return 0; Loading @@ -11148,7 +11175,9 @@ int newidle_balance(struct rq *this_rq, struct rq_flags *rf) */ if (!cpu_active(this_cpu)) return 0; if (!is_min_capacity_cpu(this_cpu) && silver_has_big_tasks() && (atomic_read(&this_rq->nr_iowait) == 0)) avg_idle = ULLONG_MAX; /* * This is OK, because current is on_cpu, which avoids it being picked * for load-balance and preemption/IRQs are still disabled avoiding Loading @@ -11157,7 +11186,7 @@ int newidle_balance(struct rq *this_rq, struct rq_flags *rf) */ rq_unpin_lock(this_rq, rf); if (this_rq->avg_idle < sysctl_sched_migration_cost || if (avg_idle < sysctl_sched_migration_cost || !READ_ONCE(this_rq->rd->overload)) { rcu_read_lock(); Loading @@ -11182,7 +11211,7 @@ int newidle_balance(struct rq *this_rq, struct rq_flags *rf) if (!(sd->flags & SD_LOAD_BALANCE)) continue; if (this_rq->avg_idle < curr_cost + sd->max_newidle_lb_cost) { if (avg_idle < curr_cost + sd->max_newidle_lb_cost) { update_next_balance(sd, &next_balance); break; } Loading
kernel/sched/walt.h +2 −0 Original line number Diff line number Diff line Loading @@ -13,6 +13,8 @@ #define EXITING_TASK_MARKER 0xdeaddead extern unsigned int walt_rotation_enabled; extern void __weak walt_update_task_ravg(struct task_struct *p, struct rq *rq, int event, u64 wallclock, u64 irqtime); Loading