Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 0cd64efb authored by Peter Zijlstra's avatar Peter Zijlstra Committed by Ingo Molnar
Browse files

arch,ia64: Convert smp_mb__*()



ia64 atomic ops are full barriers; implement the new
smp_mb__{before,after}_atomic().

Signed-off-by: default avatarPeter Zijlstra <peterz@infradead.org>
Acked-by: default avatarPaul E. McKenney <paulmck@linux.vnet.ibm.com>
Link: http://lkml.kernel.org/n/tip-hyp7yj68cmqz1nqbfpr541ca@git.kernel.org


Cc: Akinobu Mita <akinobu.mita@gmail.com>
Cc: Fenghua Yu <fenghua.yu@intel.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Tony Luck <tony.luck@intel.com>
Cc: Will Deacon <will.deacon@arm.com>
Cc: linux-ia64@vger.kernel.org
Cc: linux-kernel@vger.kernel.org
Signed-off-by: default avatarIngo Molnar <mingo@kernel.org>
parent 94cf42f8
Loading
Loading
Loading
Loading
+1 −6
Original line number Original line Diff line number Diff line
@@ -15,6 +15,7 @@
#include <linux/types.h>
#include <linux/types.h>


#include <asm/intrinsics.h>
#include <asm/intrinsics.h>
#include <asm/barrier.h>




#define ATOMIC_INIT(i)		{ (i) }
#define ATOMIC_INIT(i)		{ (i) }
@@ -208,10 +209,4 @@ atomic64_add_negative (__s64 i, atomic64_t *v)
#define atomic64_inc(v)			atomic64_add(1, (v))
#define atomic64_inc(v)			atomic64_add(1, (v))
#define atomic64_dec(v)			atomic64_sub(1, (v))
#define atomic64_dec(v)			atomic64_sub(1, (v))


/* Atomic operations are already serializing */
#define smp_mb__before_atomic_dec()	barrier()
#define smp_mb__after_atomic_dec()	barrier()
#define smp_mb__before_atomic_inc()	barrier()
#define smp_mb__after_atomic_inc()	barrier()

#endif /* _ASM_IA64_ATOMIC_H */
#endif /* _ASM_IA64_ATOMIC_H */
+3 −0
Original line number Original line Diff line number Diff line
@@ -55,6 +55,9 @@


#endif
#endif


#define smp_mb__before_atomic()	barrier()
#define smp_mb__after_atomic()	barrier()

/*
/*
 * IA64 GCC turns volatile stores into st.rel and volatile loads into ld.acq no
 * IA64 GCC turns volatile stores into st.rel and volatile loads into ld.acq no
 * need for asm trickery!
 * need for asm trickery!
+2 −4
Original line number Original line Diff line number Diff line
@@ -16,6 +16,7 @@
#include <linux/compiler.h>
#include <linux/compiler.h>
#include <linux/types.h>
#include <linux/types.h>
#include <asm/intrinsics.h>
#include <asm/intrinsics.h>
#include <asm/barrier.h>


/**
/**
 * set_bit - Atomically set a bit in memory
 * set_bit - Atomically set a bit in memory
@@ -65,9 +66,6 @@ __set_bit (int nr, volatile void *addr)
	*((__u32 *) addr + (nr >> 5)) |= (1 << (nr & 31));
	*((__u32 *) addr + (nr >> 5)) |= (1 << (nr & 31));
}
}


#define smp_mb__before_clear_bit()	barrier();
#define smp_mb__after_clear_bit()	barrier();

/**
/**
 * clear_bit - Clears a bit in memory
 * clear_bit - Clears a bit in memory
 * @nr: Bit to clear
 * @nr: Bit to clear
@@ -75,7 +73,7 @@ __set_bit (int nr, volatile void *addr)
 *
 *
 * clear_bit() is atomic and may not be reordered.  However, it does
 * clear_bit() is atomic and may not be reordered.  However, it does
 * not contain a memory barrier, so if it is used for locking purposes,
 * not contain a memory barrier, so if it is used for locking purposes,
 * you should call smp_mb__before_clear_bit() and/or smp_mb__after_clear_bit()
 * you should call smp_mb__before_atomic() and/or smp_mb__after_atomic()
 * in order to ensure changes are visible on other processors.
 * in order to ensure changes are visible on other processors.
 */
 */
static __inline__ void
static __inline__ void