Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit b540360a authored by Dietmar Eggemann's avatar Dietmar Eggemann Committed by Quentin Perret
Browse files

ANDROID: sched/events: Introduce cfs_rq load tracking trace event



The following trace event keys are mapped to:

 (1) load     : cfs_rq->avg.load_avg

 (2) rbl_load : cfs_rq->avg.runnable_load_avg

 (2) util     : cfs_rq->avg.util_avg

To let this trace event work for configurations w/ and w/o group
scheduling support for cfs (CONFIG_FAIR_GROUP_SCHED) the following
special handling is necessary for a non-existent key=value pair:

 path = "(null)" : In case of !CONFIG_FAIR_GROUP_SCHED.

The following list shows examples of the key=value pairs in different
configurations for:

 (1) a root task_group:

     cpu=4 path=/ load=6 rbl_load=6 util=331

 (2) a task_group:

     cpu=1 path=/tg1/tg11/tg111 load=538 rbl_load=538 util=522

 (3) an autogroup:

     cpu=3 path=/autogroup-18 load=997 rbl_load=997 util=517

 (4) w/o CONFIG_FAIR_GROUP_SCHED:

     cpu=0 path=(null) load=314 rbl_load=314 util=289

The trace event is only defined for CONFIG_SMP.

The helper function __trace_sched_path() can be used to get the length
parameter of the dynamic array (path == NULL) and to copy the path into
it (path != NULL).

Signed-off-by: default avatarDietmar Eggemann <dietmar.eggemann@arm.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Ingo Molnar <mingo@kernel.org>
Cc: Steven Rostedt <rostedt@goodmis.org>
[ Fixed issues related to the new pelt.c file ]
Signed-off-by: default avatarQuentin Perret <quentin.perret@arm.com>
Change-Id: I1107044c52b74ecb3df69f3a45c1e530f0e59b1b
parent 31d01ca5
Loading
Loading
Loading
Loading
+66 −0
Original line number Diff line number Diff line
@@ -572,6 +572,72 @@ TRACE_EVENT(sched_wake_idle_without_ipi,

	TP_printk("cpu=%d", __entry->cpu)
);

#ifdef CONFIG_SMP
#ifdef CREATE_TRACE_POINTS
static inline
int __trace_sched_cpu(struct cfs_rq *cfs_rq)
{
#ifdef CONFIG_FAIR_GROUP_SCHED
	struct rq *rq = cfs_rq->rq;
#else
	struct rq *rq = container_of(cfs_rq, struct rq, cfs);
#endif
	return cpu_of(rq);
}

static inline
int __trace_sched_path(struct cfs_rq *cfs_rq, char *path, int len)
{
#ifdef CONFIG_FAIR_GROUP_SCHED
	int l = path ? len : 0;

	if (task_group_is_autogroup(cfs_rq->tg))
		return autogroup_path(cfs_rq->tg, path, l) + 1;
	else
		return cgroup_path(cfs_rq->tg->css.cgroup, path, l) + 1;
#else
	if (path)
		strcpy(path, "(null)");

	return strlen("(null)");
#endif
}

#endif /* CREATE_TRACE_POINTS */

/*
 * Tracepoint for cfs_rq load tracking:
 */
TRACE_EVENT(sched_load_cfs_rq,

	TP_PROTO(struct cfs_rq *cfs_rq),

	TP_ARGS(cfs_rq),

	TP_STRUCT__entry(
		__field(	int,		cpu			)
		__dynamic_array(char,		path,
				__trace_sched_path(cfs_rq, NULL, 0)	)
		__field(	unsigned long,	load			)
		__field(	unsigned long,	rbl_load		)
		__field(	unsigned long,	util			)
	),

	TP_fast_assign(
		__entry->cpu		= __trace_sched_cpu(cfs_rq);
		__trace_sched_path(cfs_rq, __get_dynamic_array(path),
				   __get_dynamic_array_len(path));
		__entry->load		= cfs_rq->avg.load_avg;
		__entry->rbl_load 	= cfs_rq->avg.runnable_load_avg;
		__entry->util		= cfs_rq->avg.util_avg;
	),

	TP_printk("cpu=%d path=%s load=%lu rbl_load=%lu util=%lu",
		  __entry->cpu, __get_str(path), __entry->load,
		  __entry->rbl_load,__entry->util)
);
#endif /* CONFIG_SMP */
#endif /* _TRACE_SCHED_H */

/* This part must be outside protection */
+6 −0
Original line number Diff line number Diff line
@@ -3338,6 +3338,8 @@ static inline int propagate_entity_load_avg(struct sched_entity *se)
	update_tg_cfs_util(cfs_rq, se, gcfs_rq);
	update_tg_cfs_runnable(cfs_rq, se, gcfs_rq);

	trace_sched_load_cfs_rq(cfs_rq);

	return 1;
}

@@ -3490,6 +3492,8 @@ static void attach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *s
	add_tg_cfs_propagate(cfs_rq, se->avg.load_sum);

	cfs_rq_util_change(cfs_rq, flags);

	trace_sched_load_cfs_rq(cfs_rq);
}

/**
@@ -3509,6 +3513,8 @@ static void detach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *s
	add_tg_cfs_propagate(cfs_rq, -se->avg.load_sum);

	cfs_rq_util_change(cfs_rq, 0);

	trace_sched_load_cfs_rq(cfs_rq);
}

/*
+5 −0
Original line number Diff line number Diff line
@@ -29,6 +29,8 @@
#include "sched-pelt.h"
#include "pelt.h"

#include <trace/events/sched.h>

/*
 * Approximate:
 *   val * y^n,    where y^32 ~= 0.5 (~1 scheduling period)
@@ -304,6 +306,9 @@ int __update_load_avg_cfs_rq(u64 now, int cpu, struct cfs_rq *cfs_rq)
				cfs_rq->curr != NULL)) {

		___update_load_avg(&cfs_rq->avg, 1, 1);

		trace_sched_load_cfs_rq(cfs_rq);

		return 1;
	}