Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit f1d88b44 authored by Viresh Kumar's avatar Viresh Kumar Committed by Ingo Molnar
Browse files

sched/fair: Rearrange select_task_rq_fair() to optimize it



Rearrange select_task_rq_fair() a bit to avoid executing some
conditional statements in few specific code-paths. That gets rid of the
goto as well.

This shouldn't result in any functional changes.

Tested-by: default avatarRohit Jain <rohit.k.jain@oracle.com>
Signed-off-by: default avatarViresh Kumar <viresh.kumar@linaro.org>
Signed-off-by: default avatarPeter Zijlstra (Intel) <peterz@infradead.org>
Reviewed-by: default avatarValentin Schneider <valentin.schneider@arm.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Vincent Guittot <vincent.guittot@linaro.org>
Link: http://lkml.kernel.org/r/20831b8d237bf3a20e4e328286f678b425ff04c9.1524738578.git.viresh.kumar@linaro.org


Signed-off-by: default avatarIngo Molnar <mingo@kernel.org>
parent b5bf9a90
Loading
Loading
Loading
Loading
+16 −21
Original line number Diff line number Diff line
@@ -6613,7 +6613,7 @@ static int wake_cap(struct task_struct *p, int cpu, int prev_cpu)
static int
select_task_rq_fair(struct task_struct *p, int prev_cpu, int sd_flag, int wake_flags)
{
	struct sched_domain *tmp, *affine_sd = NULL, *sd = NULL;
	struct sched_domain *tmp, *sd = NULL;
	int cpu = smp_processor_id();
	int new_cpu = prev_cpu;
	int want_affine = 0;
@@ -6636,7 +6636,10 @@ select_task_rq_fair(struct task_struct *p, int prev_cpu, int sd_flag, int wake_f
		 */
		if (want_affine && (tmp->flags & SD_WAKE_AFFINE) &&
		    cpumask_test_cpu(prev_cpu, sched_domain_span(tmp))) {
			affine_sd = tmp;
			if (cpu != prev_cpu)
				new_cpu = wake_affine(tmp, p, cpu, prev_cpu, sync);

			sd = NULL; /* Prefer wake_affine over balance flags */
			break;
		}

@@ -6646,34 +6649,26 @@ select_task_rq_fair(struct task_struct *p, int prev_cpu, int sd_flag, int wake_f
			break;
	}

	if (affine_sd) {
		sd = NULL; /* Prefer wake_affine over balance flags */
		if (cpu == prev_cpu)
			goto pick_cpu;

		new_cpu = wake_affine(affine_sd, p, cpu, prev_cpu, sync);
	}
	if (unlikely(sd)) {
		/* Slow path */

	if (sd && !(sd_flag & SD_BALANCE_FORK)) {
		/*
		 * We're going to need the task's util for capacity_spare_wake
		 * in find_idlest_group. Sync it up to prev_cpu's
		 * last_update_time.
		 */
		if (!(sd_flag & SD_BALANCE_FORK))
			sync_entity_load_avg(&p->se);
	}

	if (!sd) {
pick_cpu:
		if (sd_flag & SD_BALANCE_WAKE) { /* XXX always ? */
		new_cpu = find_idlest_cpu(sd, p, cpu, prev_cpu, sd_flag);
	} else if (sd_flag & SD_BALANCE_WAKE) { /* XXX always ? */
		/* Fast path */

		new_cpu = select_idle_sibling(p, prev_cpu, new_cpu);

		if (want_affine)
			current->recent_used_cpu = cpu;
	}
	} else {
		new_cpu = find_idlest_cpu(sd, p, cpu, prev_cpu, sd_flag);
	}
	rcu_read_unlock();

	return new_cpu;