Loading include/linux/sched/sysctl.h +6 −0 Original line number Diff line number Diff line Loading @@ -22,6 +22,8 @@ extern unsigned int sysctl_sched_is_big_little; extern unsigned int sysctl_sched_sync_hint_enable; extern unsigned int sysctl_sched_initial_task_util; extern unsigned int sysctl_sched_cstate_aware; extern unsigned int sysctl_sched_capacity_margin; extern unsigned int sysctl_sched_capacity_margin_down; #ifdef CONFIG_SCHED_WALT extern unsigned int sysctl_sched_use_walt_cpu_util; extern unsigned int sysctl_sched_use_walt_task_util; Loading Loading @@ -156,6 +158,10 @@ extern int sched_rt_handler(struct ctl_table *table, int write, void __user *buffer, size_t *lenp, loff_t *ppos); extern int sched_updown_migrate_handler(struct ctl_table *table, int write, void __user *buffer, size_t *lenp, loff_t *ppos); extern int sysctl_numa_balancing(struct ctl_table *table, int write, void __user *buffer, size_t *lenp, loff_t *ppos); Loading include/linux/sysctl.h +3 −0 Original line number Diff line number Diff line Loading @@ -59,6 +59,9 @@ extern int proc_doulongvec_ms_jiffies_minmax(struct ctl_table *table, int, void __user *, size_t *, loff_t *); extern int proc_do_large_bitmap(struct ctl_table *, int, void __user *, size_t *, loff_t *); extern int proc_douintvec_capacity(struct ctl_table *table, int write, void __user *buffer, size_t *lenp, loff_t *ppos); /* * Register a set of sysctl names by calling register_sysctl_table Loading kernel/sched/core.c +26 −0 Original line number Diff line number Diff line Loading @@ -9137,6 +9137,32 @@ int sched_rr_handler(struct ctl_table *table, int write, return ret; } #ifdef CONFIG_PROC_SYSCTL int sched_updown_migrate_handler(struct ctl_table *table, int write, void __user *buffer, size_t *lenp, loff_t *ppos) { int ret; unsigned int *data = (unsigned int *)table->data; unsigned int old_val; static DEFINE_MUTEX(mutex); mutex_lock(&mutex); old_val = *data; ret = proc_douintvec_capacity(table, write, buffer, lenp, ppos); if (!ret && write && sysctl_sched_capacity_margin > sysctl_sched_capacity_margin_down) { ret = -EINVAL; *data = old_val; } mutex_unlock(&mutex); return ret; } #endif #ifdef CONFIG_CGROUP_SCHED inline struct task_group *css_tg(struct cgroup_subsys_state *css) Loading kernel/sched/fair.c +10 −6 Original line number Diff line number Diff line Loading @@ -221,8 +221,8 @@ unsigned int sysctl_sched_cfs_bandwidth_slice = 5000UL; * The margin used when comparing utilization with CPU capacity: * util * 1024 < capacity * margin */ unsigned int capacity_margin = 1078; /* ~5% margin */ unsigned int capacity_margin_down = 1205; /* ~15% margin */ unsigned int sysctl_sched_capacity_margin = 1078; /* ~5% margin */ unsigned int sysctl_sched_capacity_margin_down = 1205; /* ~15% margin */ static inline void update_load_add(struct load_weight *lw, unsigned long inc) { Loading Loading @@ -5918,9 +5918,9 @@ static inline bool __task_fits(struct task_struct *p, int cpu, int util) util += boosted_task_util(p); if (capacity_orig_of(task_cpu(p)) > capacity_orig_of(cpu)) margin = capacity_margin_down; margin = sysctl_sched_capacity_margin_down; else margin = capacity_margin; margin = sysctl_sched_capacity_margin; return (capacity_orig_of(cpu) * 1024) > (util * margin); } Loading Loading @@ -5948,7 +5948,7 @@ static inline bool task_fits_spare(struct task_struct *p, int cpu) static bool __cpu_overutilized(int cpu, int delta) { return (capacity_orig_of(cpu) * 1024) < ((cpu_util(cpu) + delta) * capacity_margin); ((cpu_util(cpu) + delta) * sysctl_sched_capacity_margin); } bool cpu_overutilized(int cpu) Loading Loading @@ -6085,10 +6085,14 @@ find_idlest_group(struct sched_domain *sd, struct task_struct *p, struct sched_group *fit_group = NULL, *spare_group = NULL; unsigned long min_load = ULONG_MAX, this_load = 0; unsigned long fit_capacity = ULONG_MAX; unsigned long max_spare_capacity = capacity_margin - SCHED_CAPACITY_SCALE; unsigned long max_spare_capacity; int load_idx = sd->forkexec_idx; int imbalance = 100 + (sd->imbalance_pct-100)/2; max_spare_capacity = sysctl_sched_capacity_margin - SCHED_CAPACITY_SCALE; if (sd_flag & SD_BALANCE_WAKE) load_idx = sd->wake_idx; Loading kernel/sysctl.c +52 −0 Original line number Diff line number Diff line Loading @@ -586,6 +586,20 @@ static struct ctl_table kern_table[] = { .extra1 = &min_wakeup_granularity_ns, .extra2 = &max_wakeup_granularity_ns, }, { .procname = "sched_upmigrate", .data = &sysctl_sched_capacity_margin, .maxlen = sizeof(unsigned int), .mode = 0644, .proc_handler = sched_updown_migrate_handler, }, { .procname = "sched_downmigrate", .data = &sysctl_sched_capacity_margin_down, .maxlen = sizeof(unsigned int), .mode = 0644, .proc_handler = sched_updown_migrate_handler, }, #ifdef CONFIG_SMP { .procname = "sched_tunable_scaling", Loading Loading @@ -3181,6 +3195,39 @@ int proc_do_large_bitmap(struct ctl_table *table, int write, } } static int do_proc_douintvec_capacity_conv(bool *negp, unsigned long *lvalp, int *valp, int write, void *data) { if (write) { if (*negp) return -EINVAL; *valp = SCHED_FIXEDPOINT_SCALE * 100 / *lvalp; } else { *negp = false; *lvalp = SCHED_FIXEDPOINT_SCALE * 100 / *valp; } return 0; } /** * proc_douintvec_capacity - read a vector of integers in percentage and convert * into sched capacity * @table: the sysctl table * @write: %TRUE if this is a write to the sysctl file * @buffer: the user buffer * @lenp: the size of the user buffer * @ppos: file position * * Returns 0 on success. */ int proc_douintvec_capacity(struct ctl_table *table, int write, void __user *buffer, size_t *lenp, loff_t *ppos) { return do_proc_dointvec(table, write, buffer, lenp, ppos, do_proc_douintvec_capacity_conv, NULL); } #else /* CONFIG_PROC_SYSCTL */ int proc_dostring(struct ctl_table *table, int write, Loading Loading @@ -3238,6 +3285,11 @@ int proc_doulongvec_ms_jiffies_minmax(struct ctl_table *table, int write, return -ENOSYS; } int proc_douintvec_capacity(struct ctl_table *table, int write, void __user *buffer, size_t *lenp, loff_t *ppos) { return -ENOSYS; } #endif /* CONFIG_PROC_SYSCTL */ Loading Loading
include/linux/sched/sysctl.h +6 −0 Original line number Diff line number Diff line Loading @@ -22,6 +22,8 @@ extern unsigned int sysctl_sched_is_big_little; extern unsigned int sysctl_sched_sync_hint_enable; extern unsigned int sysctl_sched_initial_task_util; extern unsigned int sysctl_sched_cstate_aware; extern unsigned int sysctl_sched_capacity_margin; extern unsigned int sysctl_sched_capacity_margin_down; #ifdef CONFIG_SCHED_WALT extern unsigned int sysctl_sched_use_walt_cpu_util; extern unsigned int sysctl_sched_use_walt_task_util; Loading Loading @@ -156,6 +158,10 @@ extern int sched_rt_handler(struct ctl_table *table, int write, void __user *buffer, size_t *lenp, loff_t *ppos); extern int sched_updown_migrate_handler(struct ctl_table *table, int write, void __user *buffer, size_t *lenp, loff_t *ppos); extern int sysctl_numa_balancing(struct ctl_table *table, int write, void __user *buffer, size_t *lenp, loff_t *ppos); Loading
include/linux/sysctl.h +3 −0 Original line number Diff line number Diff line Loading @@ -59,6 +59,9 @@ extern int proc_doulongvec_ms_jiffies_minmax(struct ctl_table *table, int, void __user *, size_t *, loff_t *); extern int proc_do_large_bitmap(struct ctl_table *, int, void __user *, size_t *, loff_t *); extern int proc_douintvec_capacity(struct ctl_table *table, int write, void __user *buffer, size_t *lenp, loff_t *ppos); /* * Register a set of sysctl names by calling register_sysctl_table Loading
kernel/sched/core.c +26 −0 Original line number Diff line number Diff line Loading @@ -9137,6 +9137,32 @@ int sched_rr_handler(struct ctl_table *table, int write, return ret; } #ifdef CONFIG_PROC_SYSCTL int sched_updown_migrate_handler(struct ctl_table *table, int write, void __user *buffer, size_t *lenp, loff_t *ppos) { int ret; unsigned int *data = (unsigned int *)table->data; unsigned int old_val; static DEFINE_MUTEX(mutex); mutex_lock(&mutex); old_val = *data; ret = proc_douintvec_capacity(table, write, buffer, lenp, ppos); if (!ret && write && sysctl_sched_capacity_margin > sysctl_sched_capacity_margin_down) { ret = -EINVAL; *data = old_val; } mutex_unlock(&mutex); return ret; } #endif #ifdef CONFIG_CGROUP_SCHED inline struct task_group *css_tg(struct cgroup_subsys_state *css) Loading
kernel/sched/fair.c +10 −6 Original line number Diff line number Diff line Loading @@ -221,8 +221,8 @@ unsigned int sysctl_sched_cfs_bandwidth_slice = 5000UL; * The margin used when comparing utilization with CPU capacity: * util * 1024 < capacity * margin */ unsigned int capacity_margin = 1078; /* ~5% margin */ unsigned int capacity_margin_down = 1205; /* ~15% margin */ unsigned int sysctl_sched_capacity_margin = 1078; /* ~5% margin */ unsigned int sysctl_sched_capacity_margin_down = 1205; /* ~15% margin */ static inline void update_load_add(struct load_weight *lw, unsigned long inc) { Loading Loading @@ -5918,9 +5918,9 @@ static inline bool __task_fits(struct task_struct *p, int cpu, int util) util += boosted_task_util(p); if (capacity_orig_of(task_cpu(p)) > capacity_orig_of(cpu)) margin = capacity_margin_down; margin = sysctl_sched_capacity_margin_down; else margin = capacity_margin; margin = sysctl_sched_capacity_margin; return (capacity_orig_of(cpu) * 1024) > (util * margin); } Loading Loading @@ -5948,7 +5948,7 @@ static inline bool task_fits_spare(struct task_struct *p, int cpu) static bool __cpu_overutilized(int cpu, int delta) { return (capacity_orig_of(cpu) * 1024) < ((cpu_util(cpu) + delta) * capacity_margin); ((cpu_util(cpu) + delta) * sysctl_sched_capacity_margin); } bool cpu_overutilized(int cpu) Loading Loading @@ -6085,10 +6085,14 @@ find_idlest_group(struct sched_domain *sd, struct task_struct *p, struct sched_group *fit_group = NULL, *spare_group = NULL; unsigned long min_load = ULONG_MAX, this_load = 0; unsigned long fit_capacity = ULONG_MAX; unsigned long max_spare_capacity = capacity_margin - SCHED_CAPACITY_SCALE; unsigned long max_spare_capacity; int load_idx = sd->forkexec_idx; int imbalance = 100 + (sd->imbalance_pct-100)/2; max_spare_capacity = sysctl_sched_capacity_margin - SCHED_CAPACITY_SCALE; if (sd_flag & SD_BALANCE_WAKE) load_idx = sd->wake_idx; Loading
kernel/sysctl.c +52 −0 Original line number Diff line number Diff line Loading @@ -586,6 +586,20 @@ static struct ctl_table kern_table[] = { .extra1 = &min_wakeup_granularity_ns, .extra2 = &max_wakeup_granularity_ns, }, { .procname = "sched_upmigrate", .data = &sysctl_sched_capacity_margin, .maxlen = sizeof(unsigned int), .mode = 0644, .proc_handler = sched_updown_migrate_handler, }, { .procname = "sched_downmigrate", .data = &sysctl_sched_capacity_margin_down, .maxlen = sizeof(unsigned int), .mode = 0644, .proc_handler = sched_updown_migrate_handler, }, #ifdef CONFIG_SMP { .procname = "sched_tunable_scaling", Loading Loading @@ -3181,6 +3195,39 @@ int proc_do_large_bitmap(struct ctl_table *table, int write, } } static int do_proc_douintvec_capacity_conv(bool *negp, unsigned long *lvalp, int *valp, int write, void *data) { if (write) { if (*negp) return -EINVAL; *valp = SCHED_FIXEDPOINT_SCALE * 100 / *lvalp; } else { *negp = false; *lvalp = SCHED_FIXEDPOINT_SCALE * 100 / *valp; } return 0; } /** * proc_douintvec_capacity - read a vector of integers in percentage and convert * into sched capacity * @table: the sysctl table * @write: %TRUE if this is a write to the sysctl file * @buffer: the user buffer * @lenp: the size of the user buffer * @ppos: file position * * Returns 0 on success. */ int proc_douintvec_capacity(struct ctl_table *table, int write, void __user *buffer, size_t *lenp, loff_t *ppos) { return do_proc_dointvec(table, write, buffer, lenp, ppos, do_proc_douintvec_capacity_conv, NULL); } #else /* CONFIG_PROC_SYSCTL */ int proc_dostring(struct ctl_table *table, int write, Loading Loading @@ -3238,6 +3285,11 @@ int proc_doulongvec_ms_jiffies_minmax(struct ctl_table *table, int write, return -ENOSYS; } int proc_douintvec_capacity(struct ctl_table *table, int write, void __user *buffer, size_t *lenp, loff_t *ppos) { return -ENOSYS; } #endif /* CONFIG_PROC_SYSCTL */ Loading