Loading include/linux/sched/sysctl.h +1 −0 Original line number Diff line number Diff line Loading @@ -29,6 +29,7 @@ extern unsigned int sysctl_sched_latency; extern unsigned int sysctl_sched_min_granularity; extern unsigned int sysctl_sched_wakeup_granularity; extern unsigned int sysctl_sched_child_runs_first; extern unsigned int sysctl_sched_force_lb_enable; #ifdef CONFIG_SCHED_WALT extern unsigned int __weak sysctl_sched_capacity_margin_up[MAX_MARGIN_LEVELS]; extern unsigned int __weak sysctl_sched_capacity_margin_down[MAX_MARGIN_LEVELS]; Loading kernel/sched/fair.c +2 −0 Original line number Diff line number Diff line Loading @@ -138,6 +138,7 @@ unsigned int sysctl_walt_rtg_cfs_boost_prio = 99; /* disabled by default */ unsigned int sysctl_walt_low_latency_task_threshold; /* disabled by default */ #endif unsigned int sched_small_task_threshold = 102; __read_mostly unsigned int sysctl_sched_force_lb_enable = 1; static inline void update_load_add(struct load_weight *lw, unsigned long inc) { Loading Loading @@ -11358,6 +11359,7 @@ int newidle_balance(struct rq *this_rq, struct rq_flags *rf) bool prefer_spread = prefer_spread_on_idle(this_cpu, true); bool force_lb = (!is_min_capacity_cpu(this_cpu) && silver_has_big_tasks() && sysctl_sched_force_lb_enable && (atomic_read(&this_rq->nr_iowait) == 0)); Loading kernel/sysctl.c +9 −0 Original line number Diff line number Diff line Loading @@ -573,6 +573,15 @@ static struct ctl_table kern_table[] = { .extra2 = &one_thousand, }, #endif { .procname = "sched_force_lb_enable", .data = &sysctl_sched_force_lb_enable, .maxlen = sizeof(unsigned int), .mode = 0644, .proc_handler = proc_dointvec_minmax, .extra1 = SYSCTL_ZERO, .extra2 = SYSCTL_ONE, }, #ifdef CONFIG_SCHED_DEBUG { .procname = "sched_min_granularity_ns", Loading Loading
include/linux/sched/sysctl.h +1 −0 Original line number Diff line number Diff line Loading @@ -29,6 +29,7 @@ extern unsigned int sysctl_sched_latency; extern unsigned int sysctl_sched_min_granularity; extern unsigned int sysctl_sched_wakeup_granularity; extern unsigned int sysctl_sched_child_runs_first; extern unsigned int sysctl_sched_force_lb_enable; #ifdef CONFIG_SCHED_WALT extern unsigned int __weak sysctl_sched_capacity_margin_up[MAX_MARGIN_LEVELS]; extern unsigned int __weak sysctl_sched_capacity_margin_down[MAX_MARGIN_LEVELS]; Loading
kernel/sched/fair.c +2 −0 Original line number Diff line number Diff line Loading @@ -138,6 +138,7 @@ unsigned int sysctl_walt_rtg_cfs_boost_prio = 99; /* disabled by default */ unsigned int sysctl_walt_low_latency_task_threshold; /* disabled by default */ #endif unsigned int sched_small_task_threshold = 102; __read_mostly unsigned int sysctl_sched_force_lb_enable = 1; static inline void update_load_add(struct load_weight *lw, unsigned long inc) { Loading Loading @@ -11358,6 +11359,7 @@ int newidle_balance(struct rq *this_rq, struct rq_flags *rf) bool prefer_spread = prefer_spread_on_idle(this_cpu, true); bool force_lb = (!is_min_capacity_cpu(this_cpu) && silver_has_big_tasks() && sysctl_sched_force_lb_enable && (atomic_read(&this_rq->nr_iowait) == 0)); Loading
kernel/sysctl.c +9 −0 Original line number Diff line number Diff line Loading @@ -573,6 +573,15 @@ static struct ctl_table kern_table[] = { .extra2 = &one_thousand, }, #endif { .procname = "sched_force_lb_enable", .data = &sysctl_sched_force_lb_enable, .maxlen = sizeof(unsigned int), .mode = 0644, .proc_handler = proc_dointvec_minmax, .extra1 = SYSCTL_ZERO, .extra2 = SYSCTL_ONE, }, #ifdef CONFIG_SCHED_DEBUG { .procname = "sched_min_granularity_ns", Loading