Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 38022906 authored by Peter Zijlstra's avatar Peter Zijlstra Committed by Ingo Molnar
Browse files

sched: Fix sched_exec() balancing



Since we access ->cpus_allowed without holding rq->lock we need
a retry loop to validate the result, this comes for near free
when we merge sched_migrate_task() into sched_exec() since that
already does the needed check.

Signed-off-by: default avatarPeter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Mike Galbraith <efault@gmx.de>
LKML-Reference: <20091216170517.884743662@chello.nl>
Signed-off-by: default avatarIngo Molnar <mingo@elte.hu>
parent e2912009
Loading
Loading
Loading
Loading
+23 −22
Original line number Diff line number Diff line
@@ -2322,7 +2322,7 @@ void task_oncpu_function_call(struct task_struct *p,
 *
 *  - fork, @p is stable because it isn't on the tasklist yet
 *
 *  - exec, @p is unstable XXX
 *  - exec, @p is unstable, retry loop
 *
 *  - wake-up, we serialize ->cpus_allowed against TASK_WAKING so
 *             we should be good.
@@ -3132,21 +3132,36 @@ static void double_rq_unlock(struct rq *rq1, struct rq *rq2)
}

/*
 * If dest_cpu is allowed for this process, migrate the task to it.
 * This is accomplished by forcing the cpu_allowed mask to only
 * allow dest_cpu, which will force the cpu onto dest_cpu. Then
 * the cpu_allowed mask is restored.
 * sched_exec - execve() is a valuable balancing opportunity, because at
 * this point the task has the smallest effective memory and cache footprint.
 */
static void sched_migrate_task(struct task_struct *p, int dest_cpu)
void sched_exec(void)
{
	struct task_struct *p = current;
	struct migration_req req;
	int dest_cpu, this_cpu;
	unsigned long flags;
	struct rq *rq;

again:
	this_cpu = get_cpu();
	dest_cpu = select_task_rq(p, SD_BALANCE_EXEC, 0);
	if (dest_cpu == this_cpu) {
		put_cpu();
		return;
	}

	rq = task_rq_lock(p, &flags);
	put_cpu();

	/*
	 * select_task_rq() can race against ->cpus_allowed
	 */
	if (!cpumask_test_cpu(dest_cpu, &p->cpus_allowed)
	    || unlikely(!cpu_active(dest_cpu)))
		goto out;
	    || unlikely(!cpu_active(dest_cpu))) {
		task_rq_unlock(rq, &flags);
		goto again;
	}

	/* force the process onto the specified CPU */
	if (migrate_task(p, dest_cpu, &req)) {
@@ -3161,23 +3176,9 @@ static void sched_migrate_task(struct task_struct *p, int dest_cpu)

		return;
	}
out:
	task_rq_unlock(rq, &flags);
}

/*
 * sched_exec - execve() is a valuable balancing opportunity, because at
 * this point the task has the smallest effective memory and cache footprint.
 */
void sched_exec(void)
{
	int new_cpu, this_cpu = get_cpu();
	new_cpu = select_task_rq(current, SD_BALANCE_EXEC, 0);
	put_cpu();
	if (new_cpu != this_cpu)
		sched_migrate_task(current, new_cpu);
}

/*
 * pull_task - move a task from a remote runqueue to the local runqueue.
 * Both runqueues must be locked.