Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 0b6cfb36 authored by Joonwoo Park's avatar Joonwoo Park
Browse files

sched/rt: make RT task upmigration possible



Current RT task placement algorithm tries lower capacity CPUs first and
then higher capacity CPUs.  However while trying to see if task fits in
the CPUs current algorithm assumes CPU which isn't overutilized is
capable of the waking-up task.  This is wrong because it doesn't account
waking-up task's utilization and capacity margin.  Consequently RT tasks
are never placed on big cluster CPUs just because its utilization if
even if it's high enough.

Check if CPU is overutilized with consideration of waking-up's
utilization as well as capacity margin.

Also refactor __cpu_overutilized() to take CPU's utilization so that RT
task placement logic can avoid unnecessary cpu_util() calls.

Change-Id: Idec8ab3d894135f436af531678fe52ffddf38e0b
Signed-off-by: default avatarJoonwoo Park <joonwoop@codeaurora.org>
parent 2abf571b
Loading
Loading
Loading
Loading
+7 −5
Original line number Diff line number Diff line
@@ -6161,15 +6161,15 @@ static inline bool task_fits_spare(struct task_struct *p, int cpu)
	return __task_fits(p, cpu, cpu_util(cpu));
}

static bool __cpu_overutilized(int cpu, int delta)
bool __cpu_overutilized(int cpu, unsigned long util)
{
	return (capacity_orig_of(cpu) * 1024) <
	       ((cpu_util(cpu) + delta) * sysctl_sched_capacity_margin);
	return (capacity_orig_of(cpu) * 1024 <
		util * sysctl_sched_capacity_margin);
}

bool cpu_overutilized(int cpu)
{
	return __cpu_overutilized(cpu, 0);
	return __cpu_overutilized(cpu, cpu_util(cpu));
}

#ifdef CONFIG_SCHED_TUNE
@@ -7188,7 +7188,9 @@ static int energy_aware_wake_cpu(struct task_struct *p, int target, int sync)
		task_util_boosted = 0;
#endif
		/* Not enough spare capacity on previous cpu */
		if (__cpu_overutilized(task_cpu(p), task_util_boosted)) {
		if (__cpu_overutilized(task_cpu(p),
				       cpu_util(task_cpu(p)) +
						task_util_boosted)) {
			trace_sched_task_util_overutilzed(p, task_cpu(p),
						task_util(p), target_cpu,
						target_cpu, 0, need_idle);
+2 −1
Original line number Diff line number Diff line
@@ -1724,6 +1724,7 @@ static int find_lowest_rq(struct task_struct *task)
	unsigned long cpu_capacity;
	unsigned long best_capacity;
	unsigned long util, best_cpu_util = ULONG_MAX;
	unsigned long tutil = task_util(task);
	bool placement_boost;

	/* Make sure the mask is initialized first */
@@ -1786,7 +1787,7 @@ static int find_lowest_rq(struct task_struct *task)
			 * double count rt task load.
			 */
			util = cpu_util(cpu);
			if (!cpu_overutilized(cpu)) {
			if (!__cpu_overutilized(cpu, util + tutil)) {
				if (cpu_isolated(cpu))
					continue;

+1 −0
Original line number Diff line number Diff line
@@ -1457,6 +1457,7 @@ extern void trigger_load_balance(struct rq *rq);

extern void set_cpus_allowed_common(struct task_struct *p, const struct cpumask *new_mask);

bool __cpu_overutilized(int cpu, unsigned long util);
bool cpu_overutilized(int cpu);

#endif