Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit bb343c15 authored by Patrick Bellasi's avatar Patrick Bellasi Committed by Gerrit - the friendly Code Review server
Browse files

ANDROID: sched/events: Introduce util_est trace events



Signed-off-by: default avatarPatrick Bellasi <patrick.bellasi@arm.com>
Change-Id: I359f7ffbd62e86a16a96d7f02da38e9ff260fd99
Git-commit: a9d8b29e
Git-repo: https://android.googlesource.com/kernel/common/


[satyap@codeaurora.org: trivial merge conflict resolution in
include/trace/events/sched.h]
Signed-off-by: default avatarSatya Durga Srinivasu Prabhala <satyap@codeaurora.org>
parent 44d3dca2
Loading
Loading
Loading
Loading
+63 −0
Original line number Diff line number Diff line
@@ -1182,6 +1182,69 @@ TRACE_EVENT(sched_find_best_target,
		__entry->backup_cpu)
);

/*
 * Tracepoint for tasks' estimated utilization.
 */
TRACE_EVENT(sched_util_est_task,

	TP_PROTO(struct task_struct *tsk, struct sched_avg *avg),

	TP_ARGS(tsk, avg),

	TP_STRUCT__entry(
		__array( char,	comm,	TASK_COMM_LEN		)
		__field( pid_t,		pid			)
		__field( int,		cpu			)
		__field( unsigned int,	util_avg		)
		__field( unsigned int,	est_enqueued		)
		__field( unsigned int,	est_ewma		)
	),

	TP_fast_assign(
		memcpy(__entry->comm, tsk->comm, TASK_COMM_LEN);
		__entry->pid			= tsk->pid;
		__entry->cpu                    = task_cpu(tsk);
		__entry->util_avg               = avg->util_avg;
		__entry->est_enqueued           = avg->util_est.enqueued;
		__entry->est_ewma               = avg->util_est.ewma;
	),

	TP_printk("comm=%s pid=%d cpu=%d util_avg=%u util_est_ewma=%u util_est_enqueued=%u",
		  __entry->comm,
		  __entry->pid,
		  __entry->cpu,
		  __entry->util_avg,
		  __entry->est_ewma,
		  __entry->est_enqueued)
);

/*
 * Tracepoint for root cfs_rq's estimated utilization.
 */
TRACE_EVENT(sched_util_est_cpu,

	TP_PROTO(int cpu, struct cfs_rq *cfs_rq),

	TP_ARGS(cpu, cfs_rq),

	TP_STRUCT__entry(
		__field( int,		cpu			)
		__field( unsigned int,	util_avg		)
		__field( unsigned int,	util_est_enqueued	)
	),

	TP_fast_assign(
		__entry->cpu			= cpu;
		__entry->util_avg		= cfs_rq->avg.util_avg;
		__entry->util_est_enqueued	= cfs_rq->avg.util_est.enqueued;
	),

	TP_printk("cpu=%d util_avg=%u util_est_enqueued=%u",
		  __entry->cpu,
		  __entry->util_avg,
		  __entry->util_est_enqueued)
);

TRACE_EVENT(sched_cpu_util,

	TP_PROTO(int cpu),
+29 −0
Original line number Diff line number Diff line
@@ -3145,6 +3145,28 @@ __update_load_avg_se(u64 now, int cpu, struct cfs_rq *cfs_rq, struct sched_entit
			       se->on_rq * scale_load_down(se->load.weight),
			       cfs_rq->curr == se, NULL, NULL)) {
		cfs_se_util_change(&se->avg);

#ifdef UTIL_EST_DEBUG
		/*
		 * Trace utilization only for actual tasks.
		 *
		 * These trace events are mostly useful to get easier to
		 * read plots for the estimated utilization, where we can
		 * compare it with the actual grow/decrease of the original
		 * PELT signal.
		 * Let's keep them disabled by default in "production kernels".
		 */
		if (entity_is_task(se)) {
			struct task_struct *tsk = task_of(se);

			trace_sched_util_est_task(tsk, &se->avg);

			/* Trace utilization only for top level CFS RQ */
			cfs_rq = &(task_rq(tsk)->cfs);
			trace_sched_util_est_cpu(cpu, cfs_rq);
		}
#endif /* UTIL_EST_DEBUG */

		return 1;
	}

@@ -3722,6 +3744,9 @@ static inline void util_est_enqueue(struct cfs_rq *cfs_rq,
	enqueued  = cfs_rq->avg.util_est.enqueued;
	enqueued += (_task_util_est(p) | UTIL_AVG_UNCHANGED);
	WRITE_ONCE(cfs_rq->avg.util_est.enqueued, enqueued);

	trace_sched_util_est_task(p, &p->se.avg);
	trace_sched_util_est_cpu(cpu_of(rq_of(cfs_rq)), cfs_rq);
}

/*
@@ -3760,6 +3785,8 @@ util_est_dequeue(struct cfs_rq *cfs_rq, struct task_struct *p, bool task_sleep)
	}
	WRITE_ONCE(cfs_rq->avg.util_est.enqueued, ue.enqueued);

	trace_sched_util_est_cpu(cpu_of(rq_of(cfs_rq)), cfs_rq);

	/*
	 * Skip update of task's estimated utilization when the task has not
	 * yet completed an activation, e.g. being migrated.
@@ -3805,6 +3832,8 @@ util_est_dequeue(struct cfs_rq *cfs_rq, struct task_struct *p, bool task_sleep)
	ue.ewma  += last_ewma_diff;
	ue.ewma >>= UTIL_EST_WEIGHT_SHIFT;
	WRITE_ONCE(p->se.avg.util_est, ue);

	trace_sched_util_est_task(p, &p->se.avg);
}

#else /* CONFIG_SMP */