Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 4dcfe102 authored by Peter Zijlstra's avatar Peter Zijlstra Committed by Ingo Molnar
Browse files

sched: Avoid SMT siblings in select_idle_sibling() if possible



Avoid select_idle_sibling() from picking a sibling thread if there's
an idle core that shares cache.

This fixes SMT balancing in the increasingly common case where there's
a shared cache core available to balance to.

Tested-by: default avatarMike Galbraith <efault@gmx.de>
Signed-off-by: default avatarPeter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Suresh Siddha <suresh.b.siddha@intel.com>
Link: http://lkml.kernel.org/r/1321350377.1421.55.camel@twins


Signed-off-by: default avatarIngo Molnar <mingo@elte.hu>
parent f1c6f1a7
Loading
Loading
Loading
Loading
+28 −14
Original line number Diff line number Diff line
@@ -2326,7 +2326,8 @@ static int select_idle_sibling(struct task_struct *p, int target)
	int cpu = smp_processor_id();
	int prev_cpu = task_cpu(p);
	struct sched_domain *sd;
	int i;
	struct sched_group *sg;
	int i, smt = 0;

	/*
	 * If the task is going to be woken-up on this cpu and if it is
@@ -2346,25 +2347,38 @@ static int select_idle_sibling(struct task_struct *p, int target)
	 * Otherwise, iterate the domains and find an elegible idle cpu.
	 */
	rcu_read_lock();
again:
	for_each_domain(target, sd) {
		if (!(sd->flags & SD_SHARE_PKG_RESOURCES))
			break;
		if (!smt && (sd->flags & SD_SHARE_CPUPOWER))
			continue;

		for_each_cpu_and(i, sched_domain_span(sd), tsk_cpus_allowed(p)) {
			if (idle_cpu(i)) {
				target = i;
		if (!(sd->flags & SD_SHARE_PKG_RESOURCES)) {
			if (!smt) {
				smt = 1;
				goto again;
			}
			break;
		}

		sg = sd->groups;
		do {
			if (!cpumask_intersects(sched_group_cpus(sg),
						tsk_cpus_allowed(p)))
				goto next;

			for_each_cpu(i, sched_group_cpus(sg)) {
				if (!idle_cpu(i))
					goto next;
			}

		/*
		 * Lets stop looking for an idle sibling when we reached
		 * the domain that spans the current cpu and prev_cpu.
		 */
		if (cpumask_test_cpu(cpu, sched_domain_span(sd)) &&
		    cpumask_test_cpu(prev_cpu, sched_domain_span(sd)))
			break;
			target = cpumask_first_and(sched_group_cpus(sg),
					tsk_cpus_allowed(p));
			goto done;
next:
			sg = sg->next;
		} while (sg != sd->groups);
	}
done:
	rcu_read_unlock();

	return target;