Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 726328d9 authored by Peter Zijlstra's avatar Peter Zijlstra Committed by Ingo Molnar
Browse files

locking/spinlock, arch: Update and fix spin_unlock_wait() implementations



This patch updates/fixes all spin_unlock_wait() implementations.

The update is in semantics; where it previously was only a control
dependency, we now upgrade to a full load-acquire to match the
store-release from the spin_unlock() we waited on. This ensures that
when spin_unlock_wait() returns, we're guaranteed to observe the full
critical section we waited on.

This fixes a number of spin_unlock_wait() users that (not
unreasonably) rely on this.

I also fixed a number of ticket lock versions to only wait on the
current lock holder, instead of for a full unlock, as this is
sufficient.

Furthermore; again for ticket locks; I added an smp_rmb() in between
the initial ticket load and the spin loop testing the current value
because I could not convince myself the address dependency is
sufficient, esp. if the loads are of different sizes.

I'm more than happy to remove this smp_rmb() again if people are
certain the address dependency does indeed work as expected.

Note: PPC32 will be fixed independently

Signed-off-by: default avatarPeter Zijlstra (Intel) <peterz@infradead.org>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: chris@zankel.net
Cc: cmetcalf@mellanox.com
Cc: davem@davemloft.net
Cc: dhowells@redhat.com
Cc: james.hogan@imgtec.com
Cc: jejb@parisc-linux.org
Cc: linux@armlinux.org.uk
Cc: mpe@ellerman.id.au
Cc: ralf@linux-mips.org
Cc: realmz6@gmail.com
Cc: rkuo@codeaurora.org
Cc: rth@twiddle.net
Cc: schwidefsky@de.ibm.com
Cc: tony.luck@intel.com
Cc: vgupta@synopsys.com
Cc: ysato@users.sourceforge.jp
Cc: linux-kernel@vger.kernel.org
Signed-off-by: default avatarIngo Molnar <mingo@kernel.org>
parent b464d127
Loading
Loading
Loading
Loading
+7 −2
Original line number Diff line number Diff line
@@ -3,6 +3,8 @@

#include <linux/kernel.h>
#include <asm/current.h>
#include <asm/barrier.h>
#include <asm/processor.h>

/*
 * Simple spin lock operations.  There are two variants, one clears IRQ's
@@ -13,8 +15,11 @@

#define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock)
#define arch_spin_is_locked(x)	((x)->lock != 0)
#define arch_spin_unlock_wait(x) \
		do { cpu_relax(); } while ((x)->lock)

static inline void arch_spin_unlock_wait(arch_spinlock_t *lock)
{
	smp_cond_load_acquire(&lock->lock, !VAL);
}

static inline int arch_spin_value_unlocked(arch_spinlock_t lock)
{
+5 −2
Original line number Diff line number Diff line
@@ -15,8 +15,11 @@

#define arch_spin_is_locked(x)	((x)->slock != __ARCH_SPIN_LOCK_UNLOCKED__)
#define arch_spin_lock_flags(lock, flags)	arch_spin_lock(lock)
#define arch_spin_unlock_wait(x) \
	do { while (arch_spin_is_locked(x)) cpu_relax(); } while (0)

static inline void arch_spin_unlock_wait(arch_spinlock_t *lock)
{
	smp_cond_load_acquire(&lock->slock, !VAL);
}

#ifdef CONFIG_ARC_HAS_LLSC

+17 −2
Original line number Diff line number Diff line
@@ -6,6 +6,8 @@
#endif

#include <linux/prefetch.h>
#include <asm/barrier.h>
#include <asm/processor.h>

/*
 * sev and wfe are ARMv6K extensions.  Uniprocessor ARMv6 may not have the K
@@ -50,8 +52,21 @@ static inline void dsb_sev(void)
 * memory.
 */

#define arch_spin_unlock_wait(lock) \
	do { while (arch_spin_is_locked(lock)) cpu_relax(); } while (0)
static inline void arch_spin_unlock_wait(arch_spinlock_t *lock)
{
	u16 owner = READ_ONCE(lock->tickets.owner);

	for (;;) {
		arch_spinlock_t tmp = READ_ONCE(*lock);

		if (tmp.tickets.owner == tmp.tickets.next ||
		    tmp.tickets.owner != owner)
			break;

		wfe();
	}
	smp_acquire__after_ctrl_dep();
}

#define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock)

+3 −2
Original line number Diff line number Diff line
@@ -12,6 +12,8 @@
#else

#include <linux/atomic.h>
#include <asm/processor.h>
#include <asm/barrier.h>

asmlinkage int __raw_spin_is_locked_asm(volatile int *ptr);
asmlinkage void __raw_spin_lock_asm(volatile int *ptr);
@@ -48,8 +50,7 @@ static inline void arch_spin_unlock(arch_spinlock_t *lock)

static inline void arch_spin_unlock_wait(arch_spinlock_t *lock)
{
	while (arch_spin_is_locked(lock))
		cpu_relax();
	smp_cond_load_acquire(&lock->lock, !VAL);
}

static inline int arch_read_can_lock(arch_rwlock_t *rw)
+8 −2
Original line number Diff line number Diff line
@@ -23,6 +23,8 @@
#define _ASM_SPINLOCK_H

#include <asm/irqflags.h>
#include <asm/barrier.h>
#include <asm/processor.h>

/*
 * This file is pulled in for SMP builds.
@@ -176,8 +178,12 @@ static inline unsigned int arch_spin_trylock(arch_spinlock_t *lock)
 * SMP spinlocks are intended to allow only a single CPU at the lock
 */
#define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock)
#define arch_spin_unlock_wait(lock) \
	do {while (arch_spin_is_locked(lock)) cpu_relax(); } while (0)

static inline void arch_spin_unlock_wait(arch_spinlock_t *lock)
{
	smp_cond_load_acquire(&lock->lock, !VAL);
}

#define arch_spin_is_locked(x) ((x)->lock != 0)

#define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
Loading