Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 2a1c6029 authored by Xunlei Pang's avatar Xunlei Pang Committed by Thomas Gleixner
Browse files

rtmutex: Deboost before waking up the top waiter



We should deboost before waking the high-priority task, such that we
don't run two tasks with the same "state" (priority, deadline,
sched_class, etc).

In order to make sure the boosting task doesn't start running between
unlock and deboost (due to 'spurious' wakeup), we move the deboost
under the wait_lock, that way its serialized against the wait loop in
__rt_mutex_slowlock().

Doing the deboost early can however lead to priority-inversion if
current would get preempted after the deboost but before waking our
high-prio task, hence we disable preemption before doing deboost, and
enabling it after the wake up is over.

This gets us the right semantic order, but most importantly however;
this change ensures pointer stability for the next patch, where we
have rt_mutex_setprio() cache a pointer to the top-most waiter task.
If we, as before this change, do the wakeup first and then deboost,
this pointer might point into thin air.

[peterz: Changelog + patch munging]
Suggested-by: default avatarPeter Zijlstra <peterz@infradead.org>
Signed-off-by: default avatarXunlei Pang <xlpang@redhat.com>
Signed-off-by: default avatarPeter Zijlstra (Intel) <peterz@infradead.org>
Acked-by: default avatarSteven Rostedt <rostedt@goodmis.org>
Cc: juri.lelli@arm.com
Cc: bigeasy@linutronix.de
Cc: mathieu.desnoyers@efficios.com
Cc: jdesfossez@efficios.com
Cc: bristot@redhat.com
Link: http://lkml.kernel.org/r/20170323150216.110065320@infradead.org


Signed-off-by: default avatarThomas Gleixner <tglx@linutronix.de>
parent 38bffdac
Loading
Loading
Loading
Loading
+1 −4
Original line number Diff line number Diff line
@@ -1460,10 +1460,7 @@ static int wake_futex_pi(u32 __user *uaddr, u32 uval, struct futex_pi_state *pi_
out_unlock:
	raw_spin_unlock_irq(&pi_state->pi_mutex.wait_lock);

	if (deboost) {
		wake_up_q(&wake_q);
		rt_mutex_adjust_prio(current);
	}
	rt_mutex_postunlock(&wake_q, deboost);

	return ret;
}
+32 −27
Original line number Diff line number Diff line
@@ -372,24 +372,6 @@ static void __rt_mutex_adjust_prio(struct task_struct *task)
		rt_mutex_setprio(task, prio);
}

/*
 * Adjust task priority (undo boosting). Called from the exit path of
 * rt_mutex_slowunlock() and rt_mutex_slowlock().
 *
 * (Note: We do this outside of the protection of lock->wait_lock to
 * allow the lock to be taken while or before we readjust the priority
 * of task. We do not use the spin_xx_mutex() variants here as we are
 * outside of the debug path.)
 */
void rt_mutex_adjust_prio(struct task_struct *task)
{
	unsigned long flags;

	raw_spin_lock_irqsave(&task->pi_lock, flags);
	__rt_mutex_adjust_prio(task);
	raw_spin_unlock_irqrestore(&task->pi_lock, flags);
}

/*
 * Deadlock detection is conditional:
 *
@@ -1051,6 +1033,7 @@ static void mark_wakeup_next_waiter(struct wake_q_head *wake_q,
	 * lock->wait_lock.
	 */
	rt_mutex_dequeue_pi(current, waiter);
	__rt_mutex_adjust_prio(current);

	/*
	 * As we are waking up the top waiter, and the waiter stays
@@ -1393,6 +1376,16 @@ static bool __sched rt_mutex_slowunlock(struct rt_mutex *lock,
	 */
	mark_wakeup_next_waiter(wake_q, lock);

	/*
	 * We should deboost before waking the top waiter task such that
	 * we don't run two tasks with the 'same' priority. This however
	 * can lead to prio-inversion if we would get preempted after
	 * the deboost but before waking our high-prio task, hence the
	 * preempt_disable before unlock. Pairs with preempt_enable() in
	 * rt_mutex_postunlock();
	 */
	preempt_disable();

	raw_spin_unlock_irqrestore(&lock->wait_lock, flags);

	/* check PI boosting */
@@ -1442,6 +1435,18 @@ rt_mutex_fasttrylock(struct rt_mutex *lock,
	return slowfn(lock);
}

/*
 * Undo pi boosting (if necessary) and wake top waiter.
 */
void rt_mutex_postunlock(struct wake_q_head *wake_q, bool deboost)
{
	wake_up_q(wake_q);

	/* Pairs with preempt_disable() in rt_mutex_slowunlock() */
	if (deboost)
		preempt_enable();
}

static inline void
rt_mutex_fastunlock(struct rt_mutex *lock,
		    bool (*slowfn)(struct rt_mutex *lock,
@@ -1455,11 +1460,7 @@ rt_mutex_fastunlock(struct rt_mutex *lock,

	deboost = slowfn(lock, &wake_q);

	wake_up_q(&wake_q);

	/* Undo pi boosting if necessary: */
	if (deboost)
		rt_mutex_adjust_prio(current);
	rt_mutex_postunlock(&wake_q, deboost);
}

/**
@@ -1572,6 +1573,13 @@ bool __sched __rt_mutex_futex_unlock(struct rt_mutex *lock,
	}

	mark_wakeup_next_waiter(wake_q, lock);
	/*
	 * We've already deboosted, retain preempt_disabled when dropping
	 * the wait_lock to avoid inversion until the wakeup. Matched
	 * by rt_mutex_postunlock();
	 */
	preempt_disable();

	return true; /* deboost and wakeups */
}

@@ -1584,10 +1592,7 @@ void __sched rt_mutex_futex_unlock(struct rt_mutex *lock)
	deboost = __rt_mutex_futex_unlock(lock, &wake_q);
	raw_spin_unlock_irq(&lock->wait_lock);

	if (deboost) {
		wake_up_q(&wake_q);
		rt_mutex_adjust_prio(current);
	}
	rt_mutex_postunlock(&wake_q, deboost);
}

/**
+1 −1
Original line number Diff line number Diff line
@@ -122,7 +122,7 @@ extern void rt_mutex_futex_unlock(struct rt_mutex *lock);
extern bool __rt_mutex_futex_unlock(struct rt_mutex *lock,
				 struct wake_q_head *wqh);

extern void rt_mutex_adjust_prio(struct task_struct *task);
extern void rt_mutex_postunlock(struct wake_q_head *wake_q, bool deboost);

#ifdef CONFIG_DEBUG_RT_MUTEXES
# include "rtmutex-debug.h"