Loading include/trace/events/sched.h +63 −0 Original line number Diff line number Diff line Loading @@ -986,6 +986,69 @@ TRACE_EVENT(sched_find_best_target, __entry->target) ); /* * Tracepoint for tasks' estimated utilization. */ TRACE_EVENT(sched_util_est_task, TP_PROTO(struct task_struct *tsk, struct sched_avg *avg), TP_ARGS(tsk, avg), TP_STRUCT__entry( __array( char, comm, TASK_COMM_LEN ) __field( pid_t, pid ) __field( int, cpu ) __field( unsigned int, util_avg ) __field( unsigned int, est_enqueued ) __field( unsigned int, est_ewma ) ), TP_fast_assign( memcpy(__entry->comm, tsk->comm, TASK_COMM_LEN); __entry->pid = tsk->pid; __entry->cpu = task_cpu(tsk); __entry->util_avg = avg->util_avg; __entry->est_enqueued = avg->util_est.enqueued; __entry->est_ewma = avg->util_est.ewma; ), TP_printk("comm=%s pid=%d cpu=%d util_avg=%u util_est_ewma=%u util_est_enqueued=%u", __entry->comm, __entry->pid, __entry->cpu, __entry->util_avg, __entry->est_ewma, __entry->est_enqueued) ); /* * Tracepoint for root cfs_rq's estimated utilization. */ TRACE_EVENT(sched_util_est_cpu, TP_PROTO(int cpu, struct cfs_rq *cfs_rq), TP_ARGS(cpu, cfs_rq), TP_STRUCT__entry( __field( int, cpu ) __field( unsigned int, util_avg ) __field( unsigned int, util_est_enqueued ) ), TP_fast_assign( __entry->cpu = cpu; __entry->util_avg = cfs_rq->avg.util_avg; __entry->util_est_enqueued = cfs_rq->avg.util_est.enqueued; ), TP_printk("cpu=%d util_avg=%u util_est_enqueued=%u", __entry->cpu, __entry->util_avg, __entry->util_est_enqueued) ); #ifdef CONFIG_SCHED_WALT struct rq; Loading kernel/sched/fair.c +29 −0 Original line number Diff line number Diff line Loading @@ -3094,6 +3094,28 @@ __update_load_avg_se(u64 now, int cpu, struct cfs_rq *cfs_rq, struct sched_entit se->on_rq * scale_load_down(se->load.weight), cfs_rq->curr == se, NULL, NULL)) { cfs_se_util_change(&se->avg); #ifdef UTIL_EST_DEBUG /* * Trace utilization only for actual tasks. * * These trace events are mostly useful to get easier to * read plots for the estimated utilization, where we can * compare it with the actual grow/decrease of the original * PELT signal. * Let's keep them disabled by default in "production kernels". */ if (entity_is_task(se)) { struct task_struct *tsk = task_of(se); trace_sched_util_est_task(tsk, &se->avg); /* Trace utilization only for top level CFS RQ */ cfs_rq = &(task_rq(tsk)->cfs); trace_sched_util_est_cpu(cpu, cfs_rq); } #endif /* UTIL_EST_DEBUG */ return 1; } Loading Loading @@ -3686,6 +3708,9 @@ static inline void util_est_enqueue(struct cfs_rq *cfs_rq, enqueued = cfs_rq->avg.util_est.enqueued; enqueued += (_task_util_est(p) | UTIL_AVG_UNCHANGED); WRITE_ONCE(cfs_rq->avg.util_est.enqueued, enqueued); trace_sched_util_est_task(p, &p->se.avg); trace_sched_util_est_cpu(cpu_of(rq_of(cfs_rq)), cfs_rq); } /* Loading Loading @@ -3724,6 +3749,8 @@ util_est_dequeue(struct cfs_rq *cfs_rq, struct task_struct *p, bool task_sleep) } WRITE_ONCE(cfs_rq->avg.util_est.enqueued, ue.enqueued); trace_sched_util_est_cpu(cpu_of(rq_of(cfs_rq)), cfs_rq); /* * Skip update of task's estimated utilization when the task has not * yet completed an activation, e.g. being migrated. Loading Loading @@ -3769,6 +3796,8 @@ util_est_dequeue(struct cfs_rq *cfs_rq, struct task_struct *p, bool task_sleep) ue.ewma += last_ewma_diff; ue.ewma >>= UTIL_EST_WEIGHT_SHIFT; WRITE_ONCE(p->se.avg.util_est, ue); trace_sched_util_est_task(p, &p->se.avg); } #else /* CONFIG_SMP */ Loading Loading
include/trace/events/sched.h +63 −0 Original line number Diff line number Diff line Loading @@ -986,6 +986,69 @@ TRACE_EVENT(sched_find_best_target, __entry->target) ); /* * Tracepoint for tasks' estimated utilization. */ TRACE_EVENT(sched_util_est_task, TP_PROTO(struct task_struct *tsk, struct sched_avg *avg), TP_ARGS(tsk, avg), TP_STRUCT__entry( __array( char, comm, TASK_COMM_LEN ) __field( pid_t, pid ) __field( int, cpu ) __field( unsigned int, util_avg ) __field( unsigned int, est_enqueued ) __field( unsigned int, est_ewma ) ), TP_fast_assign( memcpy(__entry->comm, tsk->comm, TASK_COMM_LEN); __entry->pid = tsk->pid; __entry->cpu = task_cpu(tsk); __entry->util_avg = avg->util_avg; __entry->est_enqueued = avg->util_est.enqueued; __entry->est_ewma = avg->util_est.ewma; ), TP_printk("comm=%s pid=%d cpu=%d util_avg=%u util_est_ewma=%u util_est_enqueued=%u", __entry->comm, __entry->pid, __entry->cpu, __entry->util_avg, __entry->est_ewma, __entry->est_enqueued) ); /* * Tracepoint for root cfs_rq's estimated utilization. */ TRACE_EVENT(sched_util_est_cpu, TP_PROTO(int cpu, struct cfs_rq *cfs_rq), TP_ARGS(cpu, cfs_rq), TP_STRUCT__entry( __field( int, cpu ) __field( unsigned int, util_avg ) __field( unsigned int, util_est_enqueued ) ), TP_fast_assign( __entry->cpu = cpu; __entry->util_avg = cfs_rq->avg.util_avg; __entry->util_est_enqueued = cfs_rq->avg.util_est.enqueued; ), TP_printk("cpu=%d util_avg=%u util_est_enqueued=%u", __entry->cpu, __entry->util_avg, __entry->util_est_enqueued) ); #ifdef CONFIG_SCHED_WALT struct rq; Loading
kernel/sched/fair.c +29 −0 Original line number Diff line number Diff line Loading @@ -3094,6 +3094,28 @@ __update_load_avg_se(u64 now, int cpu, struct cfs_rq *cfs_rq, struct sched_entit se->on_rq * scale_load_down(se->load.weight), cfs_rq->curr == se, NULL, NULL)) { cfs_se_util_change(&se->avg); #ifdef UTIL_EST_DEBUG /* * Trace utilization only for actual tasks. * * These trace events are mostly useful to get easier to * read plots for the estimated utilization, where we can * compare it with the actual grow/decrease of the original * PELT signal. * Let's keep them disabled by default in "production kernels". */ if (entity_is_task(se)) { struct task_struct *tsk = task_of(se); trace_sched_util_est_task(tsk, &se->avg); /* Trace utilization only for top level CFS RQ */ cfs_rq = &(task_rq(tsk)->cfs); trace_sched_util_est_cpu(cpu, cfs_rq); } #endif /* UTIL_EST_DEBUG */ return 1; } Loading Loading @@ -3686,6 +3708,9 @@ static inline void util_est_enqueue(struct cfs_rq *cfs_rq, enqueued = cfs_rq->avg.util_est.enqueued; enqueued += (_task_util_est(p) | UTIL_AVG_UNCHANGED); WRITE_ONCE(cfs_rq->avg.util_est.enqueued, enqueued); trace_sched_util_est_task(p, &p->se.avg); trace_sched_util_est_cpu(cpu_of(rq_of(cfs_rq)), cfs_rq); } /* Loading Loading @@ -3724,6 +3749,8 @@ util_est_dequeue(struct cfs_rq *cfs_rq, struct task_struct *p, bool task_sleep) } WRITE_ONCE(cfs_rq->avg.util_est.enqueued, ue.enqueued); trace_sched_util_est_cpu(cpu_of(rq_of(cfs_rq)), cfs_rq); /* * Skip update of task's estimated utilization when the task has not * yet completed an activation, e.g. being migrated. Loading Loading @@ -3769,6 +3796,8 @@ util_est_dequeue(struct cfs_rq *cfs_rq, struct task_struct *p, bool task_sleep) ue.ewma += last_ewma_diff; ue.ewma >>= UTIL_EST_WEIGHT_SHIFT; WRITE_ONCE(p->se.avg.util_est, ue); trace_sched_util_est_task(p, &p->se.avg); } #else /* CONFIG_SMP */ Loading