Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit ae0b5c2f authored by Ingo Molnar's avatar Ingo Molnar
Browse files

Merge branch 'locking/urgent' into locking/core, to pick up dependency



Signed-off-by: default avatarIngo Molnar <mingo@kernel.org>
parents 331b6d8c 2c610022
Loading
Loading
Loading
Loading
+17 −36
Original line number Diff line number Diff line
@@ -21,38 +21,34 @@

#include <asm-generic/qspinlock_types.h>

/**
 * queued_spin_unlock_wait - wait until the _current_ lock holder releases the lock
 * @lock : Pointer to queued spinlock structure
 *
 * There is a very slight possibility of live-lock if the lockers keep coming
 * and the waiter is just unfortunate enough to not see any unlock state.
 */
#ifndef queued_spin_unlock_wait
extern void queued_spin_unlock_wait(struct qspinlock *lock);
#endif

/**
 * queued_spin_is_locked - is the spinlock locked?
 * @lock: Pointer to queued spinlock structure
 * Return: 1 if it is locked, 0 otherwise
 */
#ifndef queued_spin_is_locked
static __always_inline int queued_spin_is_locked(struct qspinlock *lock)
{
	/*
	 * queued_spin_lock_slowpath() can ACQUIRE the lock before
	 * issuing the unordered store that sets _Q_LOCKED_VAL.
	 *
	 * See both smp_cond_acquire() sites for more detail.
	 *
	 * This however means that in code like:
	 *
	 *   spin_lock(A)		spin_lock(B)
	 *   spin_unlock_wait(B)	spin_is_locked(A)
	 *   do_something()		do_something()
	 *
	 * Both CPUs can end up running do_something() because the store
	 * setting _Q_LOCKED_VAL will pass through the loads in
	 * spin_unlock_wait() and/or spin_is_locked().
	 * See queued_spin_unlock_wait().
	 *
	 * Avoid this by issuing a full memory barrier between the spin_lock()
	 * and the loads in spin_unlock_wait() and spin_is_locked().
	 *
	 * Note that regular mutual exclusion doesn't care about this
	 * delayed store.
	 * Any !0 state indicates it is locked, even if _Q_LOCKED_VAL
	 * isn't immediately observable.
	 */
	smp_mb();
	return atomic_read(&lock->val) & _Q_LOCKED_MASK;
	return atomic_read(&lock->val);
}
#endif

/**
 * queued_spin_value_unlocked - is the spinlock structure unlocked?
@@ -122,21 +118,6 @@ static __always_inline void queued_spin_unlock(struct qspinlock *lock)
}
#endif

/**
 * queued_spin_unlock_wait - wait until current lock holder releases the lock
 * @lock : Pointer to queued spinlock structure
 *
 * There is a very slight possibility of live-lock if the lockers keep coming
 * and the waiter is just unfortunate enough to not see any unlock state.
 */
static inline void queued_spin_unlock_wait(struct qspinlock *lock)
{
	/* See queued_spin_is_locked() */
	smp_mb();
	while (atomic_read(&lock->val) & _Q_LOCKED_MASK)
		cpu_relax();
}

#ifndef virt_spin_lock
static __always_inline bool virt_spin_lock(struct qspinlock *lock)
{
+60 −0
Original line number Diff line number Diff line
@@ -267,6 +267,66 @@ static __always_inline u32 __pv_wait_head_or_lock(struct qspinlock *lock,
#define queued_spin_lock_slowpath	native_queued_spin_lock_slowpath
#endif

/*
 * queued_spin_lock_slowpath() can (load-)ACQUIRE the lock before
 * issuing an _unordered_ store to set _Q_LOCKED_VAL.
 *
 * This means that the store can be delayed, but no later than the
 * store-release from the unlock. This means that simply observing
 * _Q_LOCKED_VAL is not sufficient to determine if the lock is acquired.
 *
 * There are two paths that can issue the unordered store:
 *
 *  (1) clear_pending_set_locked():	*,1,0 -> *,0,1
 *
 *  (2) set_locked():			t,0,0 -> t,0,1 ; t != 0
 *      atomic_cmpxchg_relaxed():	t,0,0 -> 0,0,1
 *
 * However, in both cases we have other !0 state we've set before to queue
 * ourseves:
 *
 * For (1) we have the atomic_cmpxchg_acquire() that set _Q_PENDING_VAL, our
 * load is constrained by that ACQUIRE to not pass before that, and thus must
 * observe the store.
 *
 * For (2) we have a more intersting scenario. We enqueue ourselves using
 * xchg_tail(), which ends up being a RELEASE. This in itself is not
 * sufficient, however that is followed by an smp_cond_acquire() on the same
 * word, giving a RELEASE->ACQUIRE ordering. This again constrains our load and
 * guarantees we must observe that store.
 *
 * Therefore both cases have other !0 state that is observable before the
 * unordered locked byte store comes through. This means we can use that to
 * wait for the lock store, and then wait for an unlock.
 */
#ifndef queued_spin_unlock_wait
void queued_spin_unlock_wait(struct qspinlock *lock)
{
	u32 val;

	for (;;) {
		val = atomic_read(&lock->val);

		if (!val) /* not locked, we're done */
			goto done;

		if (val & _Q_LOCKED_MASK) /* locked, go wait for unlock */
			break;

		/* not locked, but pending, wait until we observe the lock */
		cpu_relax();
	}

	/* any unlock is good */
	while (atomic_read(&lock->val) & _Q_LOCKED_MASK)
		cpu_relax();

done:
	smp_rmb(); /* CTRL + RMB -> ACQUIRE */
}
EXPORT_SYMBOL(queued_spin_unlock_wait);
#endif

#endif /* _GEN_PV_LOCK_SLOWPATH */

/**