Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 28c715c9 authored by qctecmdr Service's avatar qctecmdr Service Committed by Gerrit - the friendly Code Review server
Browse files

Merge "sched/trace: Add sched_task_util trace point"

parents 8de9d3a1 c6b5acaa
Loading
Loading
Loading
Loading
+48 −0
Original line number Diff line number Diff line
@@ -1261,6 +1261,54 @@ TRACE_EVENT(sched_energy_diff,
		__entry->backup_cpu, __entry->backup_energy)
);

TRACE_EVENT(sched_task_util,

	TP_PROTO(struct task_struct *p, int next_cpu, int backup_cpu,
		int target_cpu, bool sync, bool need_idle, int fastpath,
		bool placement_boost, int rtg_cpu, u64 start_t),

	TP_ARGS(p, next_cpu, backup_cpu, target_cpu, sync, need_idle, fastpath,
		placement_boost, rtg_cpu, start_t),

	TP_STRUCT__entry(
		__field(int, pid			)
		__array(char, comm, TASK_COMM_LEN	)
		__field(unsigned long, util		)
		__field(int, prev_cpu			)
		__field(int, next_cpu			)
		__field(int, backup_cpu			)
		__field(int, target_cpu			)
		__field(bool, sync			)
		__field(bool, need_idle			)
		__field(int, fastpath			)
		__field(bool, placement_boost		)
		__field(int, rtg_cpu			)
		__field(u64, latency			)
	),

	TP_fast_assign(
		__entry->pid			= p->pid;
		memcpy(__entry->comm, p->comm, TASK_COMM_LEN);
		__entry->util			= task_util(p);
		__entry->prev_cpu		= task_cpu(p);
		__entry->next_cpu		= next_cpu;
		__entry->backup_cpu		= backup_cpu;
		__entry->target_cpu		= target_cpu;
		__entry->sync			= sync;
		__entry->need_idle		= need_idle;
		__entry->fastpath		= fastpath;
		__entry->placement_boost	= placement_boost;
		__entry->rtg_cpu		= rtg_cpu;
		__entry->latency		= (sched_clock() - start_t);
	),

	TP_printk("pid=%d comm=%s util=%lu prev_cpu=%d next_cpu=%d backup_cpu=%d target_cpu=%d sync=%d need_idle=%d fastpath=%d placement_boost=%d rtg_cpu=%d latency=%llu",
		__entry->pid, __entry->comm, __entry->util, __entry->prev_cpu,
		__entry->next_cpu, __entry->backup_cpu, __entry->target_cpu,
		__entry->sync, __entry->need_idle, __entry->fastpath,
		__entry->placement_boost, __entry->rtg_cpu, __entry->latency)
)

/*
 * Tracepoint for sched_get_nr_running_avg
 */
+31 −8
Original line number Diff line number Diff line
@@ -7613,6 +7613,12 @@ static inline struct cpumask *find_rtg_target(struct task_struct *p)
}
#endif

enum fastpaths {
	NONE = 0,
	SYNC_WAKEUP,
	PREV_CPU_BIAS,
};

/*
 * Needs to be called inside rcu_read_lock critical section.
 * sd is a pointer to the sched domain we wish to use for an
@@ -7625,24 +7631,31 @@ static int find_energy_efficient_cpu(struct sched_domain *sd,
{
	int use_fbt = sched_feat(FIND_BEST_TARGET);
	int cpu_iter, eas_cpu_idx = EAS_CPU_NXT;
	int energy_cpu = -1, delta = 0;
	int energy_cpu = prev_cpu, delta = 0;
	struct energy_env *eenv;
	struct cpumask *rtg_target = find_rtg_target(p);
	struct find_best_target_env fbt_env;
	bool need_idle = wake_to_idle(p);
	u64 start_t = 0;
	int fastpath = 0;

	if (trace_sched_task_util_enabled())
		start_t = sched_clock();

	if (need_idle)
		sync = 0;

	if (sysctl_sched_sync_hint_enable && sync &&
				bias_to_waker_cpu(p, cpu, rtg_target)) {
		return cpu;
		energy_cpu = cpu;
		fastpath = SYNC_WAKEUP;
		goto out;
	}

	/* prepopulate energy diff environment */
	eenv = get_eenv(p, prev_cpu);
	if (eenv->max_cpu_count < 2)
		return energy_cpu;
		goto out;

	if(!use_fbt) {
		/*
@@ -7685,8 +7698,10 @@ static int find_energy_efficient_cpu(struct sched_domain *sd,
		prefer_idle = sched_feat(EAS_PREFER_IDLE) ?
				(schedtune_prefer_idle(p) > 0) : 0;

		if (bias_to_prev_cpu(p, rtg_target))
			return prev_cpu;
		if (bias_to_prev_cpu(p, rtg_target)) {
			fastpath = PREV_CPU_BIAS;
			goto out;
		}

		eenv->max_cpu_count = EAS_CPU_BKP + 1;

@@ -7714,7 +7729,7 @@ static int find_energy_efficient_cpu(struct sched_domain *sd,
		 * candidates beyond prev_cpu, so we will
		 * fall-back to the regular slow-path.
		 */
		return energy_cpu;
		goto out;
	}

#ifdef CONFIG_SCHED_WALT
@@ -7725,13 +7740,21 @@ static int find_energy_efficient_cpu(struct sched_domain *sd,
	if (use_fbt && (fbt_env.placement_boost || fbt_env.need_idle ||
		(rtg_target && !cpumask_test_cpu(prev_cpu, rtg_target)) ||
		 __cpu_overutilized(prev_cpu, delta) ||
		 !task_fits_max(p, prev_cpu) || cpu_isolated(prev_cpu)))
		return eenv->cpu[EAS_CPU_NXT].cpu_id;
		 !task_fits_max(p, prev_cpu) || cpu_isolated(prev_cpu))) {
		energy_cpu = eenv->cpu[EAS_CPU_NXT].cpu_id;
		goto out;
	}

	/* find most energy-efficient CPU */
	energy_cpu = select_energy_cpu_idx(eenv) < 0 ? -1 :
					eenv->cpu[eenv->next_idx].cpu_id;

out:
	trace_sched_task_util(p, eenv->cpu[EAS_CPU_NXT].cpu_id,
			eenv->cpu[EAS_CPU_BKP].cpu_id, energy_cpu, sync,
			fbt_env.need_idle, fastpath, fbt_env.placement_boost,
			rtg_target ? cpumask_first(rtg_target) : -1,
			start_t);
	return energy_cpu;
}