Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit c6b5acaa authored by Satya Durga Srinivasu Prabhala's avatar Satya Durga Srinivasu Prabhala
Browse files

sched/trace: Add sched_task_util trace point



Add sched_task_util trace point for debugging issues
related to why scheduler chosen a specific CPU for
placing task.

Change-Id: I48c91651f7cbd3eaf0640f38380bb51aa693b16f
Signed-off-by: default avatarSatya Durga Srinivasu Prabhala <satyap@codeaurora.org>
parent 84f2004d
Loading
Loading
Loading
Loading
+48 −0
Original line number Original line Diff line number Diff line
@@ -1231,6 +1231,54 @@ TRACE_EVENT(sched_energy_diff,
		__entry->backup_cpu, __entry->backup_energy)
		__entry->backup_cpu, __entry->backup_energy)
);
);


TRACE_EVENT(sched_task_util,

	TP_PROTO(struct task_struct *p, int next_cpu, int backup_cpu,
		int target_cpu, bool sync, bool need_idle, int fastpath,
		bool placement_boost, int rtg_cpu, u64 start_t),

	TP_ARGS(p, next_cpu, backup_cpu, target_cpu, sync, need_idle, fastpath,
		placement_boost, rtg_cpu, start_t),

	TP_STRUCT__entry(
		__field(int, pid			)
		__array(char, comm, TASK_COMM_LEN	)
		__field(unsigned long, util		)
		__field(int, prev_cpu			)
		__field(int, next_cpu			)
		__field(int, backup_cpu			)
		__field(int, target_cpu			)
		__field(bool, sync			)
		__field(bool, need_idle			)
		__field(int, fastpath			)
		__field(bool, placement_boost		)
		__field(int, rtg_cpu			)
		__field(u64, latency			)
	),

	TP_fast_assign(
		__entry->pid			= p->pid;
		memcpy(__entry->comm, p->comm, TASK_COMM_LEN);
		__entry->util			= task_util(p);
		__entry->prev_cpu		= task_cpu(p);
		__entry->next_cpu		= next_cpu;
		__entry->backup_cpu		= backup_cpu;
		__entry->target_cpu		= target_cpu;
		__entry->sync			= sync;
		__entry->need_idle		= need_idle;
		__entry->fastpath		= fastpath;
		__entry->placement_boost	= placement_boost;
		__entry->rtg_cpu		= rtg_cpu;
		__entry->latency		= (sched_clock() - start_t);
	),

	TP_printk("pid=%d comm=%s util=%lu prev_cpu=%d next_cpu=%d backup_cpu=%d target_cpu=%d sync=%d need_idle=%d fastpath=%d placement_boost=%d rtg_cpu=%d latency=%llu",
		__entry->pid, __entry->comm, __entry->util, __entry->prev_cpu,
		__entry->next_cpu, __entry->backup_cpu, __entry->target_cpu,
		__entry->sync, __entry->need_idle, __entry->fastpath,
		__entry->placement_boost, __entry->rtg_cpu, __entry->latency)
)

/*
/*
 * Tracepoint for sched_get_nr_running_avg
 * Tracepoint for sched_get_nr_running_avg
 */
 */
+31 −8
Original line number Original line Diff line number Diff line
@@ -7605,6 +7605,12 @@ static inline struct cpumask *find_rtg_target(struct task_struct *p)
}
}
#endif
#endif


enum fastpaths {
	NONE = 0,
	SYNC_WAKEUP,
	PREV_CPU_BIAS,
};

/*
/*
 * Needs to be called inside rcu_read_lock critical section.
 * Needs to be called inside rcu_read_lock critical section.
 * sd is a pointer to the sched domain we wish to use for an
 * sd is a pointer to the sched domain we wish to use for an
@@ -7617,24 +7623,31 @@ static int find_energy_efficient_cpu(struct sched_domain *sd,
{
{
	int use_fbt = sched_feat(FIND_BEST_TARGET);
	int use_fbt = sched_feat(FIND_BEST_TARGET);
	int cpu_iter, eas_cpu_idx = EAS_CPU_NXT;
	int cpu_iter, eas_cpu_idx = EAS_CPU_NXT;
	int energy_cpu = -1, delta = 0;
	int energy_cpu = prev_cpu, delta = 0;
	struct energy_env *eenv;
	struct energy_env *eenv;
	struct cpumask *rtg_target = find_rtg_target(p);
	struct cpumask *rtg_target = find_rtg_target(p);
	struct find_best_target_env fbt_env;
	struct find_best_target_env fbt_env;
	bool need_idle = wake_to_idle(p);
	bool need_idle = wake_to_idle(p);
	u64 start_t = 0;
	int fastpath = 0;

	if (trace_sched_task_util_enabled())
		start_t = sched_clock();


	if (need_idle)
	if (need_idle)
		sync = 0;
		sync = 0;


	if (sysctl_sched_sync_hint_enable && sync &&
	if (sysctl_sched_sync_hint_enable && sync &&
				bias_to_waker_cpu(p, cpu, rtg_target)) {
				bias_to_waker_cpu(p, cpu, rtg_target)) {
		return cpu;
		energy_cpu = cpu;
		fastpath = SYNC_WAKEUP;
		goto out;
	}
	}


	/* prepopulate energy diff environment */
	/* prepopulate energy diff environment */
	eenv = get_eenv(p, prev_cpu);
	eenv = get_eenv(p, prev_cpu);
	if (eenv->max_cpu_count < 2)
	if (eenv->max_cpu_count < 2)
		return energy_cpu;
		goto out;


	if(!use_fbt) {
	if(!use_fbt) {
		/*
		/*
@@ -7677,8 +7690,10 @@ static int find_energy_efficient_cpu(struct sched_domain *sd,
		prefer_idle = sched_feat(EAS_PREFER_IDLE) ?
		prefer_idle = sched_feat(EAS_PREFER_IDLE) ?
				(schedtune_prefer_idle(p) > 0) : 0;
				(schedtune_prefer_idle(p) > 0) : 0;


		if (bias_to_prev_cpu(p, rtg_target))
		if (bias_to_prev_cpu(p, rtg_target)) {
			return prev_cpu;
			fastpath = PREV_CPU_BIAS;
			goto out;
		}


		eenv->max_cpu_count = EAS_CPU_BKP + 1;
		eenv->max_cpu_count = EAS_CPU_BKP + 1;


@@ -7706,7 +7721,7 @@ static int find_energy_efficient_cpu(struct sched_domain *sd,
		 * candidates beyond prev_cpu, so we will
		 * candidates beyond prev_cpu, so we will
		 * fall-back to the regular slow-path.
		 * fall-back to the regular slow-path.
		 */
		 */
		return energy_cpu;
		goto out;
	}
	}


#ifdef CONFIG_SCHED_WALT
#ifdef CONFIG_SCHED_WALT
@@ -7717,13 +7732,21 @@ static int find_energy_efficient_cpu(struct sched_domain *sd,
	if (use_fbt && (fbt_env.placement_boost || fbt_env.need_idle ||
	if (use_fbt && (fbt_env.placement_boost || fbt_env.need_idle ||
		(rtg_target && !cpumask_test_cpu(prev_cpu, rtg_target)) ||
		(rtg_target && !cpumask_test_cpu(prev_cpu, rtg_target)) ||
		 __cpu_overutilized(prev_cpu, delta) ||
		 __cpu_overutilized(prev_cpu, delta) ||
		 !task_fits_max(p, prev_cpu) || cpu_isolated(prev_cpu)))
		 !task_fits_max(p, prev_cpu) || cpu_isolated(prev_cpu))) {
		return eenv->cpu[EAS_CPU_NXT].cpu_id;
		energy_cpu = eenv->cpu[EAS_CPU_NXT].cpu_id;
		goto out;
	}


	/* find most energy-efficient CPU */
	/* find most energy-efficient CPU */
	energy_cpu = select_energy_cpu_idx(eenv) < 0 ? -1 :
	energy_cpu = select_energy_cpu_idx(eenv) < 0 ? -1 :
					eenv->cpu[eenv->next_idx].cpu_id;
					eenv->cpu[eenv->next_idx].cpu_id;


out:
	trace_sched_task_util(p, eenv->cpu[EAS_CPU_NXT].cpu_id,
			eenv->cpu[EAS_CPU_BKP].cpu_id, energy_cpu, sync,
			fbt_env.need_idle, fastpath, fbt_env.placement_boost,
			rtg_target ? cpumask_first(rtg_target) : -1,
			start_t);
	return energy_cpu;
	return energy_cpu;
}
}