Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit afc5883b authored by Andrea Parri's avatar Andrea Parri Committed by Greg Kroah-Hartman
Browse files

locking/xchg/alpha: Fix xchg() and cmpxchg() memory ordering bugs



[ Upstream commit 472e8c55cf6622d1c112dc2bc777f68bbd4189db ]

Successful RMW operations are supposed to be fully ordered, but
Alpha's xchg() and cmpxchg() do not meet this requirement.

Will Deacon noticed the bug:

  > So MP using xchg:
  >
  > WRITE_ONCE(x, 1)
  > xchg(y, 1)
  >
  > smp_load_acquire(y) == 1
  > READ_ONCE(x) == 0
  >
  > would be allowed.

... which thus violates the above requirement.

Fix it by adding a leading smp_mb() to the xchg() and cmpxchg() implementations.

Reported-by: default avatarWill Deacon <will.deacon@arm.com>
Signed-off-by: default avatarAndrea Parri <parri.andrea@gmail.com>
Acked-by: default avatarPaul E. McKenney <paulmck@linux.vnet.ibm.com>
Cc: Alan Stern <stern@rowland.harvard.edu>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Ivan Kokshaysky <ink@jurassic.park.msu.ru>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Matt Turner <mattst88@gmail.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Richard Henderson <rth@twiddle.net>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: linux-alpha@vger.kernel.org
Link: http://lkml.kernel.org/r/1519291488-5752-1-git-send-email-parri.andrea@gmail.com


Signed-off-by: default avatarIngo Molnar <mingo@kernel.org>
Signed-off-by: default avatarSasha Levin <alexander.levin@microsoft.com>
Signed-off-by: default avatarGreg Kroah-Hartman <gregkh@linuxfoundation.org>
parent 8a5a436a
Loading
Loading
Loading
Loading
+18 −3
Original line number Diff line number Diff line
@@ -11,6 +11,10 @@
 * Atomic exchange.
 * Since it can be used to implement critical sections
 * it must clobber "memory" (also for interrupts in UP).
 *
 * The leading and the trailing memory barriers guarantee that these
 * operations are fully ordered.
 *
 */

static inline unsigned long
@@ -18,6 +22,7 @@ ____xchg(_u8, volatile char *m, unsigned long val)
{
	unsigned long ret, tmp, addr64;

	smp_mb();
	__asm__ __volatile__(
	"	andnot	%4,7,%3\n"
	"	insbl	%1,%4,%1\n"
@@ -42,6 +47,7 @@ ____xchg(_u16, volatile short *m, unsigned long val)
{
	unsigned long ret, tmp, addr64;

	smp_mb();
	__asm__ __volatile__(
	"	andnot	%4,7,%3\n"
	"	inswl	%1,%4,%1\n"
@@ -66,6 +72,7 @@ ____xchg(_u32, volatile int *m, unsigned long val)
{
	unsigned long dummy;

	smp_mb();
	__asm__ __volatile__(
	"1:	ldl_l %0,%4\n"
	"	bis $31,%3,%1\n"
@@ -86,6 +93,7 @@ ____xchg(_u64, volatile long *m, unsigned long val)
{
	unsigned long dummy;

	smp_mb();
	__asm__ __volatile__(
	"1:	ldq_l %0,%4\n"
	"	bis $31,%3,%1\n"
@@ -127,9 +135,12 @@ ____xchg(, volatile void *ptr, unsigned long x, int size)
 * store NEW in MEM.  Return the initial value in MEM.  Success is
 * indicated by comparing RETURN with OLD.
 *
 * The memory barrier is placed in SMP unconditionally, in order to
 * guarantee that dependency ordering is preserved when a dependency
 * is headed by an unsuccessful operation.
 * The leading and the trailing memory barriers guarantee that these
 * operations are fully ordered.
 *
 * The trailing memory barrier is placed in SMP unconditionally, in
 * order to guarantee that dependency ordering is preserved when a
 * dependency is headed by an unsuccessful operation.
 */

static inline unsigned long
@@ -137,6 +148,7 @@ ____cmpxchg(_u8, volatile char *m, unsigned char old, unsigned char new)
{
	unsigned long prev, tmp, cmp, addr64;

	smp_mb();
	__asm__ __volatile__(
	"	andnot	%5,7,%4\n"
	"	insbl	%1,%5,%1\n"
@@ -164,6 +176,7 @@ ____cmpxchg(_u16, volatile short *m, unsigned short old, unsigned short new)
{
	unsigned long prev, tmp, cmp, addr64;

	smp_mb();
	__asm__ __volatile__(
	"	andnot	%5,7,%4\n"
	"	inswl	%1,%5,%1\n"
@@ -191,6 +204,7 @@ ____cmpxchg(_u32, volatile int *m, int old, int new)
{
	unsigned long prev, cmp;

	smp_mb();
	__asm__ __volatile__(
	"1:	ldl_l %0,%5\n"
	"	cmpeq %0,%3,%1\n"
@@ -214,6 +228,7 @@ ____cmpxchg(_u64, volatile long *m, unsigned long old, unsigned long new)
{
	unsigned long prev, cmp;

	smp_mb();
	__asm__ __volatile__(
	"1:	ldq_l %0,%5\n"
	"	cmpeq %0,%3,%1\n"