Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 952111d7 authored by Paul E. McKenney's avatar Paul E. McKenney
Browse files

arch: Remove spin_unlock_wait() arch-specific definitions



There is no agreed-upon definition of spin_unlock_wait()'s semantics,
and it appears that all callers could do just as well with a lock/unlock
pair.  This commit therefore removes the underlying arch-specific
arch_spin_unlock_wait() for all architectures providing them.

Signed-off-by: default avatarPaul E. McKenney <paulmck@linux.vnet.ibm.com>
Cc: <linux-arch@vger.kernel.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Alan Stern <stern@rowland.harvard.edu>
Cc: Andrea Parri <parri.andrea@gmail.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Acked-by: default avatarWill Deacon <will.deacon@arm.com>
Acked-by: default avatarBoqun Feng <boqun.feng@gmail.com>
parent d3a024ab
Loading
Loading
Loading
Loading
+0 −5
Original line number Original line Diff line number Diff line
@@ -16,11 +16,6 @@
#define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock)
#define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock)
#define arch_spin_is_locked(x)	((x)->lock != 0)
#define arch_spin_is_locked(x)	((x)->lock != 0)


static inline void arch_spin_unlock_wait(arch_spinlock_t *lock)
{
	smp_cond_load_acquire(&lock->lock, !VAL);
}

static inline int arch_spin_value_unlocked(arch_spinlock_t lock)
static inline int arch_spin_value_unlocked(arch_spinlock_t lock)
{
{
        return lock.lock == 0;
        return lock.lock == 0;
+0 −5
Original line number Original line Diff line number Diff line
@@ -16,11 +16,6 @@
#define arch_spin_is_locked(x)	((x)->slock != __ARCH_SPIN_LOCK_UNLOCKED__)
#define arch_spin_is_locked(x)	((x)->slock != __ARCH_SPIN_LOCK_UNLOCKED__)
#define arch_spin_lock_flags(lock, flags)	arch_spin_lock(lock)
#define arch_spin_lock_flags(lock, flags)	arch_spin_lock(lock)


static inline void arch_spin_unlock_wait(arch_spinlock_t *lock)
{
	smp_cond_load_acquire(&lock->slock, !VAL);
}

#ifdef CONFIG_ARC_HAS_LLSC
#ifdef CONFIG_ARC_HAS_LLSC


static inline void arch_spin_lock(arch_spinlock_t *lock)
static inline void arch_spin_lock(arch_spinlock_t *lock)
+0 −16
Original line number Original line Diff line number Diff line
@@ -52,22 +52,6 @@ static inline void dsb_sev(void)
 * memory.
 * memory.
 */
 */


static inline void arch_spin_unlock_wait(arch_spinlock_t *lock)
{
	u16 owner = READ_ONCE(lock->tickets.owner);

	for (;;) {
		arch_spinlock_t tmp = READ_ONCE(*lock);

		if (tmp.tickets.owner == tmp.tickets.next ||
		    tmp.tickets.owner != owner)
			break;

		wfe();
	}
	smp_acquire__after_ctrl_dep();
}

#define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock)
#define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock)


static inline void arch_spin_lock(arch_spinlock_t *lock)
static inline void arch_spin_lock(arch_spinlock_t *lock)
+5 −53
Original line number Original line Diff line number Diff line
@@ -26,58 +26,6 @@
 * The memory barriers are implicit with the load-acquire and store-release
 * The memory barriers are implicit with the load-acquire and store-release
 * instructions.
 * instructions.
 */
 */
static inline void arch_spin_unlock_wait(arch_spinlock_t *lock)
{
	unsigned int tmp;
	arch_spinlock_t lockval;
	u32 owner;

	/*
	 * Ensure prior spin_lock operations to other locks have completed
	 * on this CPU before we test whether "lock" is locked.
	 */
	smp_mb();
	owner = READ_ONCE(lock->owner) << 16;

	asm volatile(
"	sevl\n"
"1:	wfe\n"
"2:	ldaxr	%w0, %2\n"
	/* Is the lock free? */
"	eor	%w1, %w0, %w0, ror #16\n"
"	cbz	%w1, 3f\n"
	/* Lock taken -- has there been a subsequent unlock->lock transition? */
"	eor	%w1, %w3, %w0, lsl #16\n"
"	cbz	%w1, 1b\n"
	/*
	 * The owner has been updated, so there was an unlock->lock
	 * transition that we missed. That means we can rely on the
	 * store-release of the unlock operation paired with the
	 * load-acquire of the lock operation to publish any of our
	 * previous stores to the new lock owner and therefore don't
	 * need to bother with the writeback below.
	 */
"	b	4f\n"
"3:\n"
	/*
	 * Serialise against any concurrent lockers by writing back the
	 * unlocked lock value
	 */
	ARM64_LSE_ATOMIC_INSN(
	/* LL/SC */
"	stxr	%w1, %w0, %2\n"
	__nops(2),
	/* LSE atomics */
"	mov	%w1, %w0\n"
"	cas	%w0, %w0, %2\n"
"	eor	%w1, %w1, %w0\n")
	/* Somebody else wrote to the lock, GOTO 10 and reload the value */
"	cbnz	%w1, 2b\n"
"4:"
	: "=&r" (lockval), "=&r" (tmp), "+Q" (*lock)
	: "r" (owner)
	: "memory");
}


#define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock)
#define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock)


@@ -176,7 +124,11 @@ static inline int arch_spin_value_unlocked(arch_spinlock_t lock)


static inline int arch_spin_is_locked(arch_spinlock_t *lock)
static inline int arch_spin_is_locked(arch_spinlock_t *lock)
{
{
	smp_mb(); /* See arch_spin_unlock_wait */
	/*
	 * Ensure prior spin_lock operations to other locks have completed
	 * on this CPU before we test whether "lock" is locked.
	 */
	smp_mb(); /* ^^^ */
	return !arch_spin_value_unlocked(READ_ONCE(*lock));
	return !arch_spin_value_unlocked(READ_ONCE(*lock));
}
}


+0 −5
Original line number Original line Diff line number Diff line
@@ -48,11 +48,6 @@ static inline void arch_spin_unlock(arch_spinlock_t *lock)
	__raw_spin_unlock_asm(&lock->lock);
	__raw_spin_unlock_asm(&lock->lock);
}
}


static inline void arch_spin_unlock_wait(arch_spinlock_t *lock)
{
	smp_cond_load_acquire(&lock->lock, !VAL);
}

static inline int arch_read_can_lock(arch_rwlock_t *rw)
static inline int arch_read_can_lock(arch_rwlock_t *rw)
{
{
	return __raw_uncached_fetch_asm(&rw->lock) > 0;
	return __raw_uncached_fetch_asm(&rw->lock) > 0;
Loading