Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 7dcadadd authored by qctecmdr Service's avatar qctecmdr Service Committed by Gerrit - the friendly Code Review server
Browse files

Merge "sched: remove skip_sg"

parents 25541e73 aba51e0e
Loading
Loading
Loading
Loading
+23 −6
Original line number Diff line number Diff line
@@ -1157,10 +1157,12 @@ TRACE_EVENT(sched_find_best_target,

	TP_PROTO(struct task_struct *tsk, bool prefer_idle,
		unsigned long min_util, int start_cpu,
		int best_idle, int best_active, int target),
		int best_idle, int best_active, int most_spare_cap, int target,
		int backup_cpu),

	TP_ARGS(tsk, prefer_idle, min_util, start_cpu,
		best_idle, best_active, target),
		best_idle, best_active, most_spare_cap, target,
		backup_cpu),

	TP_STRUCT__entry(
		__array( char,	comm,	TASK_COMM_LEN	)
@@ -1170,7 +1172,9 @@ TRACE_EVENT(sched_find_best_target,
		__field( int,	start_cpu		)
		__field( int,	best_idle		)
		__field( int,	best_active		)
		__field( int,	most_spare_cap		)
		__field( int,	target			)
		__field( int,	backup_cpu)
	),

	TP_fast_assign(
@@ -1181,15 +1185,19 @@ TRACE_EVENT(sched_find_best_target,
		__entry->start_cpu 	= start_cpu;
		__entry->best_idle	= best_idle;
		__entry->best_active	= best_active;
		__entry->most_spare_cap	= most_spare_cap;
		__entry->target		= target;
		__entry->backup_cpu	= backup_cpu;
	),

	TP_printk("pid=%d comm=%s prefer_idle=%d start_cpu=%d "
		  "best_idle=%d best_active=%d target=%d",
		  "best_idle=%d best_active=%d most_spare_cap=%d target=%d backup=%d",
		__entry->pid, __entry->comm,
		__entry->prefer_idle, __entry->start_cpu,
		__entry->best_idle, __entry->best_active,
		__entry->target)
		__entry->most_spare_cap,
		__entry->target,
		__entry->backup_cpu)
);

TRACE_EVENT(sched_cpu_util,
@@ -1208,6 +1216,10 @@ TRACE_EVENT(sched_cpu_util,
		__field(unsigned int, capacity_orig		)
		__field(int, idle_state				)
		__field(u64, irqload				)
		__field(int, online				)
		__field(int, isolated				)
		__field(int, reserved				)
		__field(int, high_irq_load			)
	),

	TP_fast_assign(
@@ -1220,10 +1232,15 @@ TRACE_EVENT(sched_cpu_util,
		__entry->capacity_orig		= capacity_orig_of(cpu);
		__entry->idle_state		= idle_get_state_idx(cpu_rq(cpu));
		__entry->irqload		= sched_irqload(cpu);
		__entry->online			= cpu_online(cpu);
		__entry->isolated		= cpu_isolated(cpu);
		__entry->reserved		= is_reserved(cpu);
		__entry->high_irq_load		= sched_cpu_high_irqload(cpu);
	),

	TP_printk("cpu=%d nr_running=%d cpu_util=%ld cpu_util_cum=%ld capacity_curr=%u capacity=%u capacity_orig=%u idle_state=%d irqload=%llu",
		__entry->cpu, __entry->nr_running, __entry->cpu_util, __entry->cpu_util_cum, __entry->capacity_curr, __entry->capacity, __entry->capacity_orig, __entry->idle_state, __entry->irqload)
	TP_printk("cpu=%d nr_running=%d cpu_util=%ld cpu_util_cum=%ld capacity_curr=%u capacity=%u capacity_orig=%u idle_state=%d irqload=%llu online=%u, isolated=%u, reserved=%u, high_irq_load=%u",
		__entry->cpu, __entry->nr_running, __entry->cpu_util, __entry->cpu_util_cum, __entry->capacity_curr, __entry->capacity, __entry->capacity_orig, __entry->idle_state, __entry->irqload,
		__entry->online, __entry->isolated, __entry->reserved, __entry->high_irq_load)
);

TRACE_EVENT(sched_energy_diff,
+14 −32
Original line number Diff line number Diff line
@@ -7044,31 +7044,6 @@ static bool is_packing_eligible(struct task_struct *p, int target_cpu,
	return (estimated_capacity <= capacity_curr_of(target_cpu));
}

static inline bool skip_sg(struct task_struct *p, struct sched_group *sg,
			   struct cpumask *rtg_target,
			   unsigned long target_capacity)
{
	/* Are all CPUs isolated in this group? */
	if (!sg->group_weight)
		return true;

	/*
	 * Don't skip a group if a task affinity allows it
	 * to run only on that group.
	 */
	if (cpumask_subset(&p->cpus_allowed, sched_group_span(sg)))
		return false;

	/*
	 * if we have found a target cpu within a group, don't bother checking
	 * other groups
	 */
	if (target_capacity != ULONG_MAX)
		return true;

	return false;
}

static int start_cpu(struct task_struct *p, bool boosted,
		     struct cpumask *rtg_target)
{
@@ -7142,14 +7117,13 @@ static inline int find_best_target(struct task_struct *p, int *backup_cpu,
	/* Scan CPUs in all SDs */
	sg = sd->groups;
	do {
		if (skip_sg(p, sg, fbt_env->rtg_target, target_capacity))
			continue;

		for_each_cpu_and(i, &p->cpus_allowed, sched_group_span(sg)) {
			unsigned long capacity_curr = capacity_curr_of(i);
			unsigned long capacity_orig = capacity_orig_of(i);
			unsigned long wake_util, new_util, new_util_cuml;

			trace_sched_cpu_util(i);

			if (!cpu_online(i) || cpu_isolated(i))
				continue;

@@ -7164,8 +7138,6 @@ static inline int find_best_target(struct task_struct *p, int *backup_cpu,
			if (sched_cpu_high_irqload(i))
				continue;

			trace_sched_cpu_util(i);

			/*
			 * p's blocked utilization is still accounted for on prev_cpu
			 * so prev_cpu will receive a negative bias due to the double
@@ -7238,7 +7210,8 @@ static inline int find_best_target(struct task_struct *p, int *backup_cpu,
					trace_sched_find_best_target(p,
							prefer_idle, min_util,
							cpu, best_idle_cpu,
							best_active_cpu, i);
							best_active_cpu,
							-1, i, -1);

					return i;
				}
@@ -7382,6 +7355,13 @@ static inline int find_best_target(struct task_struct *p, int *backup_cpu,
			!is_max_capacity_cpu(group_first_cpu(sg)))
			target_capacity = ULONG_MAX;

		/*
		 * if we have found a target cpu within a group, don't bother
		 * checking other groups
		 */
		if (target_capacity != ULONG_MAX)
			break;

	} while (sg = sg->next, sg != sd->groups);

	if (best_idle_cpu != -1 && !is_packing_eligible(p, target_cpu, fbt_env,
@@ -7426,7 +7406,9 @@ static inline int find_best_target(struct task_struct *p, int *backup_cpu,

	trace_sched_find_best_target(p, prefer_idle, min_util, cpu,
				     best_idle_cpu, best_active_cpu,
				     target_cpu);
				     most_spare_cap_cpu,
				     target_cpu,
				     *backup_cpu);

	/* it is possible for target and backup
	 * to select same CPU - if so, drop backup