Loading include/trace/events/sched.h +6 −4 Original line number Diff line number Diff line Loading @@ -713,10 +713,10 @@ TRACE_EVENT(sched_energy_diff, TRACE_EVENT(sched_task_util, TP_PROTO(struct task_struct *p, int next_cpu, int backup_cpu, int target_cpu, bool sync, bool need_idle, int target_cpu, bool sync, bool need_idle, int fastpath, bool placement_boost, int rtg_cpu, u64 start_t), TP_ARGS(p, next_cpu, backup_cpu, target_cpu, sync, need_idle, TP_ARGS(p, next_cpu, backup_cpu, target_cpu, sync, need_idle, fastpath, placement_boost, rtg_cpu, start_t), TP_STRUCT__entry( Loading @@ -729,6 +729,7 @@ TRACE_EVENT(sched_task_util, __field(int, target_cpu ) __field(bool, sync ) __field(bool, need_idle ) __field(int, fastpath ) __field(bool, placement_boost ) __field(int, rtg_cpu ) __field(u64, latency ) Loading @@ -744,13 +745,14 @@ TRACE_EVENT(sched_task_util, __entry->target_cpu = target_cpu; __entry->sync = sync; __entry->need_idle = need_idle; __entry->fastpath = fastpath; __entry->placement_boost = placement_boost; __entry->rtg_cpu = rtg_cpu; __entry->latency = (sched_clock() - start_t); ), TP_printk("pid=%d comm=%s util=%lu prev_cpu=%d next_cpu=%d backup_cpu=%d target_cpu=%d sync=%d need_idle=%d placement_boost=%d rtg_cpu=%d latency=%llu", __entry->pid, __entry->comm, __entry->util, __entry->prev_cpu, __entry->next_cpu, __entry->backup_cpu, __entry->target_cpu, __entry->sync, __entry->need_idle, __entry->placement_boost, __entry->rtg_cpu, __entry->latency) TP_printk("pid=%d comm=%s util=%lu prev_cpu=%d next_cpu=%d backup_cpu=%d target_cpu=%d sync=%d need_idle=%d fastpath=%d placement_boost=%d rtg_cpu=%d latency=%llu", __entry->pid, __entry->comm, __entry->util, __entry->prev_cpu, __entry->next_cpu, __entry->backup_cpu, __entry->target_cpu, __entry->sync, __entry->need_idle, __entry->fastpath, __entry->placement_boost, __entry->rtg_cpu, __entry->latency) ); #endif Loading kernel/sched/fair.c +20 −7 Original line number Diff line number Diff line Loading @@ -7357,6 +7357,12 @@ static inline struct cpumask *find_rtg_target(struct task_struct *p) } #endif enum fastpaths { NONE = 0, SYNC_WAKEUP, PREV_CPU_BIAS, }; static int select_energy_cpu_brute(struct task_struct *p, int prev_cpu, int sync) { bool boosted, prefer_idle; Loading @@ -7367,6 +7373,7 @@ static int select_energy_cpu_brute(struct task_struct *p, int prev_cpu, int sync struct cpumask *rtg_target = find_rtg_target(p); struct find_best_target_env fbt_env; u64 start_t = 0; int fastpath = 0; if (trace_sched_task_util_enabled()) start_t = sched_clock(); Loading Loading @@ -7403,12 +7410,17 @@ static int select_energy_cpu_brute(struct task_struct *p, int prev_cpu, int sync if (bias_to_waker_cpu(p, cpu, rtg_target)) { schedstat_inc(p->se.statistics.nr_wakeups_secb_sync); schedstat_inc(this_rq()->eas_stats.secb_sync); return cpu; target_cpu = cpu; fastpath = SYNC_WAKEUP; goto out; } } if (bias_to_prev_cpu(p, rtg_target)) return prev_cpu; if (bias_to_prev_cpu(p, rtg_target)) { target_cpu = prev_cpu; fastpath = PREV_CPU_BIAS; goto out; } rcu_read_lock(); Loading Loading @@ -7495,11 +7507,12 @@ static int select_energy_cpu_brute(struct task_struct *p, int prev_cpu, int sync schedstat_inc(this_rq()->eas_stats.secb_count); unlock: trace_sched_task_util(p, next_cpu, backup_cpu, target_cpu, sync, fbt_env.need_idle, fbt_env.placement_boost, rtg_target ? cpumask_first(rtg_target) : -1, start_t); rcu_read_unlock(); out: trace_sched_task_util(p, next_cpu, backup_cpu, target_cpu, sync, fbt_env.need_idle, fastpath, fbt_env.placement_boost, rtg_target ? cpumask_first(rtg_target) : -1, start_t); return target_cpu; } Loading Loading
include/trace/events/sched.h +6 −4 Original line number Diff line number Diff line Loading @@ -713,10 +713,10 @@ TRACE_EVENT(sched_energy_diff, TRACE_EVENT(sched_task_util, TP_PROTO(struct task_struct *p, int next_cpu, int backup_cpu, int target_cpu, bool sync, bool need_idle, int target_cpu, bool sync, bool need_idle, int fastpath, bool placement_boost, int rtg_cpu, u64 start_t), TP_ARGS(p, next_cpu, backup_cpu, target_cpu, sync, need_idle, TP_ARGS(p, next_cpu, backup_cpu, target_cpu, sync, need_idle, fastpath, placement_boost, rtg_cpu, start_t), TP_STRUCT__entry( Loading @@ -729,6 +729,7 @@ TRACE_EVENT(sched_task_util, __field(int, target_cpu ) __field(bool, sync ) __field(bool, need_idle ) __field(int, fastpath ) __field(bool, placement_boost ) __field(int, rtg_cpu ) __field(u64, latency ) Loading @@ -744,13 +745,14 @@ TRACE_EVENT(sched_task_util, __entry->target_cpu = target_cpu; __entry->sync = sync; __entry->need_idle = need_idle; __entry->fastpath = fastpath; __entry->placement_boost = placement_boost; __entry->rtg_cpu = rtg_cpu; __entry->latency = (sched_clock() - start_t); ), TP_printk("pid=%d comm=%s util=%lu prev_cpu=%d next_cpu=%d backup_cpu=%d target_cpu=%d sync=%d need_idle=%d placement_boost=%d rtg_cpu=%d latency=%llu", __entry->pid, __entry->comm, __entry->util, __entry->prev_cpu, __entry->next_cpu, __entry->backup_cpu, __entry->target_cpu, __entry->sync, __entry->need_idle, __entry->placement_boost, __entry->rtg_cpu, __entry->latency) TP_printk("pid=%d comm=%s util=%lu prev_cpu=%d next_cpu=%d backup_cpu=%d target_cpu=%d sync=%d need_idle=%d fastpath=%d placement_boost=%d rtg_cpu=%d latency=%llu", __entry->pid, __entry->comm, __entry->util, __entry->prev_cpu, __entry->next_cpu, __entry->backup_cpu, __entry->target_cpu, __entry->sync, __entry->need_idle, __entry->fastpath, __entry->placement_boost, __entry->rtg_cpu, __entry->latency) ); #endif Loading
kernel/sched/fair.c +20 −7 Original line number Diff line number Diff line Loading @@ -7357,6 +7357,12 @@ static inline struct cpumask *find_rtg_target(struct task_struct *p) } #endif enum fastpaths { NONE = 0, SYNC_WAKEUP, PREV_CPU_BIAS, }; static int select_energy_cpu_brute(struct task_struct *p, int prev_cpu, int sync) { bool boosted, prefer_idle; Loading @@ -7367,6 +7373,7 @@ static int select_energy_cpu_brute(struct task_struct *p, int prev_cpu, int sync struct cpumask *rtg_target = find_rtg_target(p); struct find_best_target_env fbt_env; u64 start_t = 0; int fastpath = 0; if (trace_sched_task_util_enabled()) start_t = sched_clock(); Loading Loading @@ -7403,12 +7410,17 @@ static int select_energy_cpu_brute(struct task_struct *p, int prev_cpu, int sync if (bias_to_waker_cpu(p, cpu, rtg_target)) { schedstat_inc(p->se.statistics.nr_wakeups_secb_sync); schedstat_inc(this_rq()->eas_stats.secb_sync); return cpu; target_cpu = cpu; fastpath = SYNC_WAKEUP; goto out; } } if (bias_to_prev_cpu(p, rtg_target)) return prev_cpu; if (bias_to_prev_cpu(p, rtg_target)) { target_cpu = prev_cpu; fastpath = PREV_CPU_BIAS; goto out; } rcu_read_lock(); Loading Loading @@ -7495,11 +7507,12 @@ static int select_energy_cpu_brute(struct task_struct *p, int prev_cpu, int sync schedstat_inc(this_rq()->eas_stats.secb_count); unlock: trace_sched_task_util(p, next_cpu, backup_cpu, target_cpu, sync, fbt_env.need_idle, fbt_env.placement_boost, rtg_target ? cpumask_first(rtg_target) : -1, start_t); rcu_read_unlock(); out: trace_sched_task_util(p, next_cpu, backup_cpu, target_cpu, sync, fbt_env.need_idle, fastpath, fbt_env.placement_boost, rtg_target ? cpumask_first(rtg_target) : -1, start_t); return target_cpu; } Loading