Loading include/linux/sched/sysctl.h +1 −0 Original line number Diff line number Diff line Loading @@ -62,6 +62,7 @@ extern unsigned int sysctl_sched_small_task_pct; extern unsigned int sysctl_sched_upmigrate_pct; extern unsigned int sysctl_sched_downmigrate_pct; extern int sysctl_sched_upmigrate_min_nice; extern unsigned int sysctl_sched_prefer_idle; extern unsigned int sysctl_sched_powerband_limit_pct; extern unsigned int sysctl_sched_boost; Loading kernel/sched/fair.c +53 −21 Original line number Diff line number Diff line Loading @@ -1337,6 +1337,13 @@ unsigned int __read_mostly sysctl_sched_downmigrate_pct = 60; */ int __read_mostly sysctl_sched_upmigrate_min_nice = 15; /* * Tunable to govern scheduler wakeup placement CPU selection * preference. If set, the scheduler chooses to wake up a task * on an idle CPU. */ unsigned int __read_mostly sysctl_sched_prefer_idle; /* * Scheduler boost is a mechanism to temporarily place tasks on CPUs * with higher capacity than those where a task would have normally Loading Loading @@ -1766,9 +1773,10 @@ static int skip_cpu(struct task_struct *p, int cpu, int reason) /* return cheapest cpu that can fit this task */ static int select_best_cpu(struct task_struct *p, int target, int reason) { int i, best_cpu = -1, fallback_idle_cpu = -1; int i, best_cpu = -1, fallback_idle_cpu = -1, min_cstate_cpu = -1; int prev_cpu = task_cpu(p); int cpu_cost, min_cost = INT_MAX; int min_idle_cost = INT_MAX, min_busy_cost = INT_MAX; u64 load, min_load = ULLONG_MAX, min_fallback_load = ULLONG_MAX; int small_task = is_small_task(p); int boost = sched_boost(); Loading Loading @@ -1824,43 +1832,67 @@ static int select_best_cpu(struct task_struct *p, int target, int reason) * overrides load and C-state. */ if (power_delta_exceeded(cpu_cost, min_cost)) { if (cpu_cost < min_cost) { min_load = load; if (cpu_cost > min_cost) continue; min_cost = cpu_cost; min_load = ULLONG_MAX; min_cstate = INT_MAX; min_cstate_cpu = -1; best_cpu = -1; } /* * Partition CPUs based on whether they are completely idle * or not. For completely idle CPUs we choose the one in * the lowest C-state and then break ties with power cost */ if (idle_cpu(i)) { if (cstate > min_cstate) continue; if (cstate < min_cstate) { min_idle_cost = cpu_cost; min_cstate = cstate; best_cpu = i; min_cstate_cpu = i; continue; } if (cpu_cost < min_idle_cost) { min_idle_cost = cpu_cost; min_cstate_cpu = i; } continue; } /* After power band, load is prioritized next. */ /* * For CPUs that are not completely idle, pick one with the * lowest load and break ties with power cost */ if (load > min_load) continue; if (load < min_load) { min_load = load; min_cost = cpu_cost; min_cstate = cstate; min_busy_cost = cpu_cost; best_cpu = i; continue; } if (load > min_load) continue; /* * The load is equal to the previous selected CPU. * This will most often occur when deciding between * idle CPUs. Power cost is prioritized after load, * followed by cstate. * This is rare but when it does happen opt for the * more power efficient CPU option. */ if (cpu_cost < min_cost) { min_cost = cpu_cost; min_cstate = cstate; best_cpu = i; continue; } if (cpu_cost == min_cost && cstate < min_cstate) { min_cstate = cstate; if (cpu_cost < min_busy_cost) { min_busy_cost = cpu_cost; best_cpu = i; } } if (min_cstate_cpu >= 0 && (sysctl_sched_prefer_idle || !(best_cpu >= 0 && mostly_idle_cpu(best_cpu)))) best_cpu = min_cstate_cpu; done: if (best_cpu < 0) { if (unlikely(fallback_idle_cpu < 0)) Loading kernel/sysctl.c +7 −0 Original line number Diff line number Diff line Loading @@ -416,6 +416,13 @@ static struct ctl_table kern_table[] = { .mode = 0644, .proc_handler = proc_dointvec, }, { .procname = "sched_prefer_idle", .data = &sysctl_sched_prefer_idle, .maxlen = sizeof(unsigned int), .mode = 0644, .proc_handler = proc_dointvec, }, { .procname = "sched_init_task_load", .data = &sysctl_sched_init_task_load_pct, Loading Loading
include/linux/sched/sysctl.h +1 −0 Original line number Diff line number Diff line Loading @@ -62,6 +62,7 @@ extern unsigned int sysctl_sched_small_task_pct; extern unsigned int sysctl_sched_upmigrate_pct; extern unsigned int sysctl_sched_downmigrate_pct; extern int sysctl_sched_upmigrate_min_nice; extern unsigned int sysctl_sched_prefer_idle; extern unsigned int sysctl_sched_powerband_limit_pct; extern unsigned int sysctl_sched_boost; Loading
kernel/sched/fair.c +53 −21 Original line number Diff line number Diff line Loading @@ -1337,6 +1337,13 @@ unsigned int __read_mostly sysctl_sched_downmigrate_pct = 60; */ int __read_mostly sysctl_sched_upmigrate_min_nice = 15; /* * Tunable to govern scheduler wakeup placement CPU selection * preference. If set, the scheduler chooses to wake up a task * on an idle CPU. */ unsigned int __read_mostly sysctl_sched_prefer_idle; /* * Scheduler boost is a mechanism to temporarily place tasks on CPUs * with higher capacity than those where a task would have normally Loading Loading @@ -1766,9 +1773,10 @@ static int skip_cpu(struct task_struct *p, int cpu, int reason) /* return cheapest cpu that can fit this task */ static int select_best_cpu(struct task_struct *p, int target, int reason) { int i, best_cpu = -1, fallback_idle_cpu = -1; int i, best_cpu = -1, fallback_idle_cpu = -1, min_cstate_cpu = -1; int prev_cpu = task_cpu(p); int cpu_cost, min_cost = INT_MAX; int min_idle_cost = INT_MAX, min_busy_cost = INT_MAX; u64 load, min_load = ULLONG_MAX, min_fallback_load = ULLONG_MAX; int small_task = is_small_task(p); int boost = sched_boost(); Loading Loading @@ -1824,43 +1832,67 @@ static int select_best_cpu(struct task_struct *p, int target, int reason) * overrides load and C-state. */ if (power_delta_exceeded(cpu_cost, min_cost)) { if (cpu_cost < min_cost) { min_load = load; if (cpu_cost > min_cost) continue; min_cost = cpu_cost; min_load = ULLONG_MAX; min_cstate = INT_MAX; min_cstate_cpu = -1; best_cpu = -1; } /* * Partition CPUs based on whether they are completely idle * or not. For completely idle CPUs we choose the one in * the lowest C-state and then break ties with power cost */ if (idle_cpu(i)) { if (cstate > min_cstate) continue; if (cstate < min_cstate) { min_idle_cost = cpu_cost; min_cstate = cstate; best_cpu = i; min_cstate_cpu = i; continue; } if (cpu_cost < min_idle_cost) { min_idle_cost = cpu_cost; min_cstate_cpu = i; } continue; } /* After power band, load is prioritized next. */ /* * For CPUs that are not completely idle, pick one with the * lowest load and break ties with power cost */ if (load > min_load) continue; if (load < min_load) { min_load = load; min_cost = cpu_cost; min_cstate = cstate; min_busy_cost = cpu_cost; best_cpu = i; continue; } if (load > min_load) continue; /* * The load is equal to the previous selected CPU. * This will most often occur when deciding between * idle CPUs. Power cost is prioritized after load, * followed by cstate. * This is rare but when it does happen opt for the * more power efficient CPU option. */ if (cpu_cost < min_cost) { min_cost = cpu_cost; min_cstate = cstate; best_cpu = i; continue; } if (cpu_cost == min_cost && cstate < min_cstate) { min_cstate = cstate; if (cpu_cost < min_busy_cost) { min_busy_cost = cpu_cost; best_cpu = i; } } if (min_cstate_cpu >= 0 && (sysctl_sched_prefer_idle || !(best_cpu >= 0 && mostly_idle_cpu(best_cpu)))) best_cpu = min_cstate_cpu; done: if (best_cpu < 0) { if (unlikely(fallback_idle_cpu < 0)) Loading
kernel/sysctl.c +7 −0 Original line number Diff line number Diff line Loading @@ -416,6 +416,13 @@ static struct ctl_table kern_table[] = { .mode = 0644, .proc_handler = proc_dointvec, }, { .procname = "sched_prefer_idle", .data = &sysctl_sched_prefer_idle, .maxlen = sizeof(unsigned int), .mode = 0644, .proc_handler = proc_dointvec, }, { .procname = "sched_init_task_load", .data = &sysctl_sched_init_task_load_pct, Loading