Loading include/linux/sched/sysctl.h +6 −0 Original line number Diff line number Diff line Loading @@ -34,6 +34,8 @@ extern unsigned int sysctl_sched_child_runs_first; extern unsigned int sysctl_sched_capacity_margin_up[MAX_MARGIN_LEVELS]; extern unsigned int sysctl_sched_capacity_margin_down[MAX_MARGIN_LEVELS]; #ifdef CONFIG_SCHED_WALT extern unsigned int sysctl_sched_user_hint; extern const int sched_user_hint_max; extern unsigned int sysctl_sched_cpu_high_irqload; extern unsigned int sysctl_sched_boost; extern unsigned int sysctl_sched_group_upmigrate_pct; Loading @@ -49,6 +51,10 @@ extern int walt_proc_group_thresholds_handler(struct ctl_table *table, int write, void __user *buffer, size_t *lenp, loff_t *ppos); extern int walt_proc_user_hint_handler(struct ctl_table *table, int write, void __user *buffer, size_t *lenp, loff_t *ppos); #endif Loading include/trace/events/walt.h +7 −4 Original line number Diff line number Diff line Loading @@ -491,9 +491,10 @@ TRACE_EVENT(sched_load_to_gov, TP_PROTO(struct rq *rq, u64 aggr_grp_load, u32 tt_load, int freq_aggr, u64 load, int policy, int big_task_rotation), int big_task_rotation, unsigned int user_hint), TP_ARGS(rq, aggr_grp_load, tt_load, freq_aggr, load, policy, big_task_rotation), big_task_rotation, user_hint), TP_STRUCT__entry( __field(int, cpu) Loading @@ -509,6 +510,7 @@ TRACE_EVENT(sched_load_to_gov, __field(u64, pl) __field(u64, load) __field(int, big_task_rotation) __field(unsigned int, user_hint) ), TP_fast_assign( Loading @@ -526,13 +528,14 @@ TRACE_EVENT(sched_load_to_gov, rq->walt_stats.pred_demands_sum_scaled; __entry->load = load; __entry->big_task_rotation = big_task_rotation; __entry->user_hint = user_hint; ), TP_printk("cpu=%d policy=%d ed_task_pid=%d aggr_grp_load=%llu freq_aggr=%d tt_load=%llu rq_ps=%llu grp_rq_ps=%llu nt_ps=%llu grp_nt_ps=%llu pl=%llu load=%llu big_task_rotation=%d", TP_printk("cpu=%d policy=%d ed_task_pid=%d aggr_grp_load=%llu freq_aggr=%d tt_load=%llu rq_ps=%llu grp_rq_ps=%llu nt_ps=%llu grp_nt_ps=%llu pl=%llu load=%llu big_task_rotation=%d user_hint=%u", __entry->cpu, __entry->policy, __entry->ed_task_pid, __entry->aggr_grp_load, __entry->freq_aggr, __entry->tt_load, __entry->rq_ps, __entry->grp_rq_ps, __entry->nt_ps, __entry->grp_nt_ps, __entry->pl, __entry->load, __entry->big_task_rotation) __entry->big_task_rotation, __entry->user_hint) ); #endif kernel/sched/core.c +2 −2 Original line number Diff line number Diff line Loading @@ -2008,7 +2008,7 @@ static inline void walt_try_to_wake_up(struct task_struct *p) rcu_read_lock(); grp = task_related_thread_group(p); if (update_preferred_cluster(grp, p, old_load)) if (update_preferred_cluster(grp, p, old_load, false)) set_preferred_cluster(grp); rcu_read_unlock(); } Loading Loading @@ -3203,7 +3203,7 @@ void scheduler_tick(void) rcu_read_lock(); grp = task_related_thread_group(curr); if (update_preferred_cluster(grp, curr, old_load)) if (update_preferred_cluster(grp, curr, old_load, true)) set_preferred_cluster(grp); rcu_read_unlock(); Loading kernel/sched/fair.c +2 −1 Original line number Diff line number Diff line Loading @@ -3912,7 +3912,8 @@ static inline bool task_fits_max(struct task_struct *p, int cpu) if (is_min_capacity_cpu(cpu)) { if (task_boost_policy(p) == SCHED_BOOST_ON_BIG || task_boost > 0 || schedtune_task_boost(p) > 0) schedtune_task_boost(p) > 0 || walt_should_kick_upmigrate(p, cpu)) return false; } else { /* mid cap cpu */ if (task_boost > 1) Loading kernel/sched/sched.h +2 −2 Original line number Diff line number Diff line Loading @@ -2680,7 +2680,7 @@ extern unsigned int __read_mostly sched_load_granule; extern int register_cpu_cycle_counter_cb(struct cpu_cycle_counter_cb *cb); extern int update_preferred_cluster(struct related_thread_group *grp, struct task_struct *p, u32 old_load); struct task_struct *p, u32 old_load, bool from_tick); extern void set_preferred_cluster(struct related_thread_group *grp); extern void add_new_task_to_grp(struct task_struct *new); Loading Loading @@ -2995,7 +2995,7 @@ static inline u32 task_load(struct task_struct *p) { return 0; } static inline u32 task_pl(struct task_struct *p) { return 0; } static inline int update_preferred_cluster(struct related_thread_group *grp, struct task_struct *p, u32 old_load) struct task_struct *p, u32 old_load, bool from_tick) { return 0; } Loading Loading
include/linux/sched/sysctl.h +6 −0 Original line number Diff line number Diff line Loading @@ -34,6 +34,8 @@ extern unsigned int sysctl_sched_child_runs_first; extern unsigned int sysctl_sched_capacity_margin_up[MAX_MARGIN_LEVELS]; extern unsigned int sysctl_sched_capacity_margin_down[MAX_MARGIN_LEVELS]; #ifdef CONFIG_SCHED_WALT extern unsigned int sysctl_sched_user_hint; extern const int sched_user_hint_max; extern unsigned int sysctl_sched_cpu_high_irqload; extern unsigned int sysctl_sched_boost; extern unsigned int sysctl_sched_group_upmigrate_pct; Loading @@ -49,6 +51,10 @@ extern int walt_proc_group_thresholds_handler(struct ctl_table *table, int write, void __user *buffer, size_t *lenp, loff_t *ppos); extern int walt_proc_user_hint_handler(struct ctl_table *table, int write, void __user *buffer, size_t *lenp, loff_t *ppos); #endif Loading
include/trace/events/walt.h +7 −4 Original line number Diff line number Diff line Loading @@ -491,9 +491,10 @@ TRACE_EVENT(sched_load_to_gov, TP_PROTO(struct rq *rq, u64 aggr_grp_load, u32 tt_load, int freq_aggr, u64 load, int policy, int big_task_rotation), int big_task_rotation, unsigned int user_hint), TP_ARGS(rq, aggr_grp_load, tt_load, freq_aggr, load, policy, big_task_rotation), big_task_rotation, user_hint), TP_STRUCT__entry( __field(int, cpu) Loading @@ -509,6 +510,7 @@ TRACE_EVENT(sched_load_to_gov, __field(u64, pl) __field(u64, load) __field(int, big_task_rotation) __field(unsigned int, user_hint) ), TP_fast_assign( Loading @@ -526,13 +528,14 @@ TRACE_EVENT(sched_load_to_gov, rq->walt_stats.pred_demands_sum_scaled; __entry->load = load; __entry->big_task_rotation = big_task_rotation; __entry->user_hint = user_hint; ), TP_printk("cpu=%d policy=%d ed_task_pid=%d aggr_grp_load=%llu freq_aggr=%d tt_load=%llu rq_ps=%llu grp_rq_ps=%llu nt_ps=%llu grp_nt_ps=%llu pl=%llu load=%llu big_task_rotation=%d", TP_printk("cpu=%d policy=%d ed_task_pid=%d aggr_grp_load=%llu freq_aggr=%d tt_load=%llu rq_ps=%llu grp_rq_ps=%llu nt_ps=%llu grp_nt_ps=%llu pl=%llu load=%llu big_task_rotation=%d user_hint=%u", __entry->cpu, __entry->policy, __entry->ed_task_pid, __entry->aggr_grp_load, __entry->freq_aggr, __entry->tt_load, __entry->rq_ps, __entry->grp_rq_ps, __entry->nt_ps, __entry->grp_nt_ps, __entry->pl, __entry->load, __entry->big_task_rotation) __entry->big_task_rotation, __entry->user_hint) ); #endif
kernel/sched/core.c +2 −2 Original line number Diff line number Diff line Loading @@ -2008,7 +2008,7 @@ static inline void walt_try_to_wake_up(struct task_struct *p) rcu_read_lock(); grp = task_related_thread_group(p); if (update_preferred_cluster(grp, p, old_load)) if (update_preferred_cluster(grp, p, old_load, false)) set_preferred_cluster(grp); rcu_read_unlock(); } Loading Loading @@ -3203,7 +3203,7 @@ void scheduler_tick(void) rcu_read_lock(); grp = task_related_thread_group(curr); if (update_preferred_cluster(grp, curr, old_load)) if (update_preferred_cluster(grp, curr, old_load, true)) set_preferred_cluster(grp); rcu_read_unlock(); Loading
kernel/sched/fair.c +2 −1 Original line number Diff line number Diff line Loading @@ -3912,7 +3912,8 @@ static inline bool task_fits_max(struct task_struct *p, int cpu) if (is_min_capacity_cpu(cpu)) { if (task_boost_policy(p) == SCHED_BOOST_ON_BIG || task_boost > 0 || schedtune_task_boost(p) > 0) schedtune_task_boost(p) > 0 || walt_should_kick_upmigrate(p, cpu)) return false; } else { /* mid cap cpu */ if (task_boost > 1) Loading
kernel/sched/sched.h +2 −2 Original line number Diff line number Diff line Loading @@ -2680,7 +2680,7 @@ extern unsigned int __read_mostly sched_load_granule; extern int register_cpu_cycle_counter_cb(struct cpu_cycle_counter_cb *cb); extern int update_preferred_cluster(struct related_thread_group *grp, struct task_struct *p, u32 old_load); struct task_struct *p, u32 old_load, bool from_tick); extern void set_preferred_cluster(struct related_thread_group *grp); extern void add_new_task_to_grp(struct task_struct *new); Loading Loading @@ -2995,7 +2995,7 @@ static inline u32 task_load(struct task_struct *p) { return 0; } static inline u32 task_pl(struct task_struct *p) { return 0; } static inline int update_preferred_cluster(struct related_thread_group *grp, struct task_struct *p, u32 old_load) struct task_struct *p, u32 old_load, bool from_tick) { return 0; } Loading