Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit bac78573 authored by Oleg Nesterov's avatar Oleg Nesterov Committed by Ingo Molnar
Browse files

sched/fair: Use task_rcu_dereference()



Simplify task_numa_compare()'s task reference magic by using
task_rcu_dereference().

Signed-off-by: default avatarOleg Nesterov <oleg@redhat.com>
Signed-off-by: default avatarPeter Zijlstra (Intel) <peterz@infradead.org>
Cc: Chris Metcalf <cmetcalf@ezchip.com>
Cc: Christoph Lameter <cl@linux.com>
Cc: Kirill Tkhai <ktkhai@parallels.com>
Cc: Kirill Tkhai <tkhai@yandex.ru>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Mike Galbraith <efault@gmx.de>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Vladimir Davydov <vdavydov@parallels.com>
Link: http://lkml.kernel.org/r/20160518195733.GA15914@redhat.com


Signed-off-by: default avatarIngo Molnar <mingo@kernel.org>
parent 150593bf
Loading
Loading
Loading
Loading
+4 −30
Original line number Diff line number Diff line
@@ -1305,6 +1305,8 @@ static void task_numa_assign(struct task_numa_env *env,
{
	if (env->best_task)
		put_task_struct(env->best_task);
	if (p)
		get_task_struct(p);

	env->best_task = p;
	env->best_imp = imp;
@@ -1372,31 +1374,11 @@ static void task_numa_compare(struct task_numa_env *env,
	long imp = env->p->numa_group ? groupimp : taskimp;
	long moveimp = imp;
	int dist = env->dist;
	bool assigned = false;

	rcu_read_lock();

	raw_spin_lock_irq(&dst_rq->lock);
	cur = dst_rq->curr;
	/*
	 * No need to move the exiting task or idle task.
	 */
	if ((cur->flags & PF_EXITING) || is_idle_task(cur))
	cur = task_rcu_dereference(&dst_rq->curr);
	if (cur && ((cur->flags & PF_EXITING) || is_idle_task(cur)))
		cur = NULL;
	else {
		/*
		 * The task_struct must be protected here to protect the
		 * p->numa_faults access in the task_weight since the
		 * numa_faults could already be freed in the following path:
		 * finish_task_switch()
		 *     --> put_task_struct()
		 *         --> __put_task_struct()
		 *             --> task_numa_free()
		 */
		get_task_struct(cur);
	}

	raw_spin_unlock_irq(&dst_rq->lock);

	/*
	 * Because we have preemption enabled we can get migrated around and
@@ -1479,7 +1461,6 @@ static void task_numa_compare(struct task_numa_env *env,
		 */
		if (!load_too_imbalanced(src_load, dst_load, env)) {
			imp = moveimp - 1;
			put_task_struct(cur);
			cur = NULL;
			goto assign;
		}
@@ -1505,16 +1486,9 @@ static void task_numa_compare(struct task_numa_env *env,
		env->dst_cpu = select_idle_sibling(env->p, env->dst_cpu);

assign:
	assigned = true;
	task_numa_assign(env, cur, imp);
unlock:
	rcu_read_unlock();
	/*
	 * The dst_rq->curr isn't assigned. The protection for task_struct is
	 * finished.
	 */
	if (cur && !assigned)
		put_task_struct(cur);
}

static void task_numa_find_cpu(struct task_numa_env *env,