Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 656e7c0c authored by Paul E. McKenney's avatar Paul E. McKenney
Browse files

Merge branches 'doc.2017.08.17a', 'fixes.2017.08.17a', 'hotplug.2017.07.25b',...

Merge branches 'doc.2017.08.17a', 'fixes.2017.08.17a', 'hotplug.2017.07.25b', 'misc.2017.08.17a', 'spin_unlock_wait_no.2017.08.17a', 'srcu.2017.07.27c' and 'torture.2017.07.24c' into HEAD

doc.2017.08.17a: Documentation updates.
fixes.2017.08.17a: RCU fixes.
hotplug.2017.07.25b: CPU-hotplug updates.
misc.2017.08.17a: Miscellaneous fixes outside of RCU (give or take conflicts).
spin_unlock_wait_no.2017.08.17a: Remove spin_unlock_wait().
srcu.2017.07.27c: SRCU updates.
torture.2017.07.24c: Torture-test updates.
Loading
Loading
Loading
Loading
+1 −1
Original line number Diff line number Diff line
@@ -8621,7 +8621,7 @@ M: Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
M:	"Paul E. McKenney" <paulmck@linux.vnet.ibm.com>
L:	linux-kernel@vger.kernel.org
S:	Supported
F:	kernel/membarrier.c
F:	kernel/sched/membarrier.c
F:	include/uapi/linux/membarrier.h

MEMORY MANAGEMENT
+0 −5
Original line number Diff line number Diff line
@@ -16,11 +16,6 @@
#define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock)
#define arch_spin_is_locked(x)	((x)->lock != 0)

static inline void arch_spin_unlock_wait(arch_spinlock_t *lock)
{
	smp_cond_load_acquire(&lock->lock, !VAL);
}

static inline int arch_spin_value_unlocked(arch_spinlock_t lock)
{
        return lock.lock == 0;
+0 −5
Original line number Diff line number Diff line
@@ -16,11 +16,6 @@
#define arch_spin_is_locked(x)	((x)->slock != __ARCH_SPIN_LOCK_UNLOCKED__)
#define arch_spin_lock_flags(lock, flags)	arch_spin_lock(lock)

static inline void arch_spin_unlock_wait(arch_spinlock_t *lock)
{
	smp_cond_load_acquire(&lock->slock, !VAL);
}

#ifdef CONFIG_ARC_HAS_LLSC

static inline void arch_spin_lock(arch_spinlock_t *lock)
+0 −16
Original line number Diff line number Diff line
@@ -52,22 +52,6 @@ static inline void dsb_sev(void)
 * memory.
 */

static inline void arch_spin_unlock_wait(arch_spinlock_t *lock)
{
	u16 owner = READ_ONCE(lock->tickets.owner);

	for (;;) {
		arch_spinlock_t tmp = READ_ONCE(*lock);

		if (tmp.tickets.owner == tmp.tickets.next ||
		    tmp.tickets.owner != owner)
			break;

		wfe();
	}
	smp_acquire__after_ctrl_dep();
}

#define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock)

static inline void arch_spin_lock(arch_spinlock_t *lock)
+5 −53
Original line number Diff line number Diff line
@@ -26,58 +26,6 @@
 * The memory barriers are implicit with the load-acquire and store-release
 * instructions.
 */
static inline void arch_spin_unlock_wait(arch_spinlock_t *lock)
{
	unsigned int tmp;
	arch_spinlock_t lockval;
	u32 owner;

	/*
	 * Ensure prior spin_lock operations to other locks have completed
	 * on this CPU before we test whether "lock" is locked.
	 */
	smp_mb();
	owner = READ_ONCE(lock->owner) << 16;

	asm volatile(
"	sevl\n"
"1:	wfe\n"
"2:	ldaxr	%w0, %2\n"
	/* Is the lock free? */
"	eor	%w1, %w0, %w0, ror #16\n"
"	cbz	%w1, 3f\n"
	/* Lock taken -- has there been a subsequent unlock->lock transition? */
"	eor	%w1, %w3, %w0, lsl #16\n"
"	cbz	%w1, 1b\n"
	/*
	 * The owner has been updated, so there was an unlock->lock
	 * transition that we missed. That means we can rely on the
	 * store-release of the unlock operation paired with the
	 * load-acquire of the lock operation to publish any of our
	 * previous stores to the new lock owner and therefore don't
	 * need to bother with the writeback below.
	 */
"	b	4f\n"
"3:\n"
	/*
	 * Serialise against any concurrent lockers by writing back the
	 * unlocked lock value
	 */
	ARM64_LSE_ATOMIC_INSN(
	/* LL/SC */
"	stxr	%w1, %w0, %2\n"
	__nops(2),
	/* LSE atomics */
"	mov	%w1, %w0\n"
"	cas	%w0, %w0, %2\n"
"	eor	%w1, %w1, %w0\n")
	/* Somebody else wrote to the lock, GOTO 10 and reload the value */
"	cbnz	%w1, 2b\n"
"4:"
	: "=&r" (lockval), "=&r" (tmp), "+Q" (*lock)
	: "r" (owner)
	: "memory");
}

#define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock)

@@ -176,7 +124,11 @@ static inline int arch_spin_value_unlocked(arch_spinlock_t lock)

static inline int arch_spin_is_locked(arch_spinlock_t *lock)
{
	smp_mb(); /* See arch_spin_unlock_wait */
	/*
	 * Ensure prior spin_lock operations to other locks have completed
	 * on this CPU before we test whether "lock" is locked.
	 */
	smp_mb(); /* ^^^ */
	return !arch_spin_value_unlocked(READ_ONCE(*lock));
}

Loading