Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 7b94b70c authored by Peter Zijlstra's avatar Peter Zijlstra Committed by Greg Kroah-Hartman
Browse files

x86/atomic: Fix smp_mb__{before,after}_atomic()



commit 69d927bba39517d0980462efc051875b7f4db185 upstream.

Recent probing at the Linux Kernel Memory Model uncovered a
'surprise'. Strongly ordered architectures where the atomic RmW
primitive implies full memory ordering and
smp_mb__{before,after}_atomic() are a simple barrier() (such as x86)
fail for:

	*x = 1;
	atomic_inc(u);
	smp_mb__after_atomic();
	r0 = *y;

Because, while the atomic_inc() implies memory order, it
(surprisingly) does not provide a compiler barrier. This then allows
the compiler to re-order like so:

	atomic_inc(u);
	*x = 1;
	smp_mb__after_atomic();
	r0 = *y;

Which the CPU is then allowed to re-order (under TSO rules) like:

	atomic_inc(u);
	r0 = *y;
	*x = 1;

And this very much was not intended. Therefore strengthen the atomic
RmW ops to include a compiler barrier.

NOTE: atomic_{or,and,xor} and the bitops already had the compiler
barrier.

Signed-off-by: default avatarPeter Zijlstra (Intel) <peterz@infradead.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: default avatarIngo Molnar <mingo@kernel.org>
Signed-off-by: default avatarJari Ruusu <jari.ruusu@gmail.com>
Signed-off-by: default avatarGreg Kroah-Hartman <gregkh@linuxfoundation.org>
parent 4a4c6175
Loading
Loading
Loading
Loading
+4 −4
Original line number Diff line number Diff line
@@ -49,7 +49,7 @@ static __always_inline void atomic_add(int i, atomic_t *v)
{
	asm volatile(LOCK_PREFIX "addl %1,%0"
		     : "+m" (v->counter)
		     : "ir" (i));
		     : "ir" (i) : "memory");
}

/**
@@ -63,7 +63,7 @@ static __always_inline void atomic_sub(int i, atomic_t *v)
{
	asm volatile(LOCK_PREFIX "subl %1,%0"
		     : "+m" (v->counter)
		     : "ir" (i));
		     : "ir" (i) : "memory");
}

/**
@@ -89,7 +89,7 @@ static __always_inline bool atomic_sub_and_test(int i, atomic_t *v)
static __always_inline void atomic_inc(atomic_t *v)
{
	asm volatile(LOCK_PREFIX "incl %0"
		     : "+m" (v->counter));
		     : "+m" (v->counter) :: "memory");
}

/**
@@ -101,7 +101,7 @@ static __always_inline void atomic_inc(atomic_t *v)
static __always_inline void atomic_dec(atomic_t *v)
{
	asm volatile(LOCK_PREFIX "decl %0"
		     : "+m" (v->counter));
		     : "+m" (v->counter) :: "memory");
}

/**
+4 −4
Original line number Diff line number Diff line
@@ -44,7 +44,7 @@ static __always_inline void atomic64_add(long i, atomic64_t *v)
{
	asm volatile(LOCK_PREFIX "addq %1,%0"
		     : "=m" (v->counter)
		     : "er" (i), "m" (v->counter));
		     : "er" (i), "m" (v->counter) : "memory");
}

/**
@@ -58,7 +58,7 @@ static inline void atomic64_sub(long i, atomic64_t *v)
{
	asm volatile(LOCK_PREFIX "subq %1,%0"
		     : "=m" (v->counter)
		     : "er" (i), "m" (v->counter));
		     : "er" (i), "m" (v->counter) : "memory");
}

/**
@@ -85,7 +85,7 @@ static __always_inline void atomic64_inc(atomic64_t *v)
{
	asm volatile(LOCK_PREFIX "incq %0"
		     : "=m" (v->counter)
		     : "m" (v->counter));
		     : "m" (v->counter) : "memory");
}

/**
@@ -98,7 +98,7 @@ static __always_inline void atomic64_dec(atomic64_t *v)
{
	asm volatile(LOCK_PREFIX "decq %0"
		     : "=m" (v->counter)
		     : "m" (v->counter));
		     : "m" (v->counter) : "memory");
}

/**
+2 −2
Original line number Diff line number Diff line
@@ -105,8 +105,8 @@ do { \
#endif

/* Atomic operations are already serializing on x86 */
#define __smp_mb__before_atomic()	barrier()
#define __smp_mb__after_atomic()	barrier()
#define __smp_mb__before_atomic()	do { } while (0)
#define __smp_mb__after_atomic()	do { } while (0)

#include <asm-generic/barrier.h>