Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 293666b7 authored by David S. Miller's avatar David S. Miller
Browse files

sparc64: Stop using memory barriers for atomics and locks.



The kernel always executes in the TSO memory model now,
so none of this stuff is necessary any more.

With helpful feedback from Nick Piggin.

Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 64f2dde3
Loading
Loading
Loading
Loading
+0 −7
Original line number Diff line number Diff line
@@ -112,17 +112,10 @@ static inline int atomic64_add_unless(atomic64_t *v, long a, long u)
#define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)

/* Atomic operations are already serializing */
#ifdef CONFIG_SMP
#define smp_mb__before_atomic_dec()	membar_storeload_loadload();
#define smp_mb__after_atomic_dec()	membar_storeload_storestore();
#define smp_mb__before_atomic_inc()	membar_storeload_loadload();
#define smp_mb__after_atomic_inc()	membar_storeload_storestore();
#else
#define smp_mb__before_atomic_dec()	barrier()
#define smp_mb__after_atomic_dec()	barrier()
#define smp_mb__before_atomic_inc()	barrier()
#define smp_mb__after_atomic_inc()	barrier()
#endif

#include <asm-generic/atomic.h>
#endif /* !(__ARCH_SPARC64_ATOMIC__) */
+0 −5
Original line number Diff line number Diff line
@@ -23,13 +23,8 @@ extern void change_bit(unsigned long nr, volatile unsigned long *addr);

#include <asm-generic/bitops/non-atomic.h>

#ifdef CONFIG_SMP
#define smp_mb__before_clear_bit()	membar_storeload_loadload()
#define smp_mb__after_clear_bit()	membar_storeload_storestore()
#else
#define smp_mb__before_clear_bit()	barrier()
#define smp_mb__after_clear_bit()	barrier()
#endif

#include <asm-generic/bitops/ffz.h>
#include <asm-generic/bitops/__ffs.h>
+0 −14
Original line number Diff line number Diff line
@@ -33,12 +33,10 @@ static inline void __raw_spin_lock(raw_spinlock_t *lock)

	__asm__ __volatile__(
"1:	ldstub		[%1], %0\n"
"	membar		#StoreLoad | #StoreStore\n"
"	brnz,pn		%0, 2f\n"
"	 nop\n"
"	.subsection	2\n"
"2:	ldub		[%1], %0\n"
"	membar		#LoadLoad\n"
"	brnz,pt		%0, 2b\n"
"	 nop\n"
"	ba,a,pt		%%xcc, 1b\n"
@@ -54,7 +52,6 @@ static inline int __raw_spin_trylock(raw_spinlock_t *lock)

	__asm__ __volatile__(
"	ldstub		[%1], %0\n"
"	membar		#StoreLoad | #StoreStore"
	: "=r" (result)
	: "r" (lock)
	: "memory");
@@ -65,7 +62,6 @@ static inline int __raw_spin_trylock(raw_spinlock_t *lock)
static inline void __raw_spin_unlock(raw_spinlock_t *lock)
{
	__asm__ __volatile__(
"	membar		#StoreStore | #LoadStore\n"
"	stb		%%g0, [%0]"
	: /* No outputs */
	: "r" (lock)
@@ -78,14 +74,12 @@ static inline void __raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long fla

	__asm__ __volatile__(
"1:	ldstub		[%2], %0\n"
"	membar		#StoreLoad | #StoreStore\n"
"	brnz,pn		%0, 2f\n"
"	 nop\n"
"	.subsection	2\n"
"2:	rdpr		%%pil, %1\n"
"	wrpr		%3, %%pil\n"
"3:	ldub		[%2], %0\n"
"	membar		#LoadLoad\n"
"	brnz,pt		%0, 3b\n"
"	 nop\n"
"	ba,pt		%%xcc, 1b\n"
@@ -108,12 +102,10 @@ static void inline __read_lock(raw_rwlock_t *lock)
"4:	 add		%0, 1, %1\n"
"	cas		[%2], %0, %1\n"
"	cmp		%0, %1\n"
"	membar		#StoreLoad | #StoreStore\n"
"	bne,pn		%%icc, 1b\n"
"	 nop\n"
"	.subsection	2\n"
"2:	ldsw		[%2], %0\n"
"	membar		#LoadLoad\n"
"	brlz,pt		%0, 2b\n"
"	 nop\n"
"	ba,a,pt		%%xcc, 4b\n"
@@ -134,7 +126,6 @@ static int inline __read_trylock(raw_rwlock_t *lock)
"	add		%0, 1, %1\n"
"	cas		[%2], %0, %1\n"
"	cmp		%0, %1\n"
"	membar		#StoreLoad | #StoreStore\n"
"	bne,pn		%%icc, 1b\n"
"	 mov		1, %0\n"
"2:"
@@ -150,7 +141,6 @@ static void inline __read_unlock(raw_rwlock_t *lock)
	unsigned long tmp1, tmp2;

	__asm__ __volatile__(
"	membar	#StoreLoad | #LoadLoad\n"
"1:	lduw	[%2], %0\n"
"	sub	%0, 1, %1\n"
"	cas	[%2], %0, %1\n"
@@ -174,12 +164,10 @@ static void inline __write_lock(raw_rwlock_t *lock)
"4:	 or		%0, %3, %1\n"
"	cas		[%2], %0, %1\n"
"	cmp		%0, %1\n"
"	membar		#StoreLoad | #StoreStore\n"
"	bne,pn		%%icc, 1b\n"
"	 nop\n"
"	.subsection	2\n"
"2:	lduw		[%2], %0\n"
"	membar		#LoadLoad\n"
"	brnz,pt		%0, 2b\n"
"	 nop\n"
"	ba,a,pt		%%xcc, 4b\n"
@@ -192,7 +180,6 @@ static void inline __write_lock(raw_rwlock_t *lock)
static void inline __write_unlock(raw_rwlock_t *lock)
{
	__asm__ __volatile__(
"	membar		#LoadStore | #StoreStore\n"
"	stw		%%g0, [%0]"
	: /* no outputs */
	: "r" (lock)
@@ -212,7 +199,6 @@ static int inline __write_trylock(raw_rwlock_t *lock)
"	 or		%0, %4, %1\n"
"	cas		[%3], %0, %1\n"
"	cmp		%0, %1\n"
"	membar		#StoreLoad | #StoreStore\n"
"	bne,pn		%%icc, 1b\n"
"	 nop\n"
"	mov		1, %2\n"
+8 −27
Original line number Diff line number Diff line
@@ -59,20 +59,9 @@ do { __asm__ __volatile__("ba,pt %%xcc, 1f\n\t" \
			     : : : "memory"); \
} while (0)

#define mb()	\
	membar_safe("#LoadLoad | #LoadStore | #StoreStore | #StoreLoad")
#define rmb()	\
	membar_safe("#LoadLoad")
#define wmb()	\
	membar_safe("#StoreStore")
#define membar_storeload() \
	membar_safe("#StoreLoad")
#define membar_storeload_storestore() \
	membar_safe("#StoreLoad | #StoreStore")
#define membar_storeload_loadload() \
	membar_safe("#StoreLoad | #LoadLoad")
#define membar_storestore_loadstore() \
	membar_safe("#StoreStore | #LoadStore")
#define mb()	membar_safe("#StoreLoad")
#define rmb()	__asm__ __volatile__("":::"memory")
#define wmb()	__asm__ __volatile__("":::"memory")

#endif

@@ -80,20 +69,20 @@ do { __asm__ __volatile__("ba,pt %%xcc, 1f\n\t" \

#define read_barrier_depends()		do { } while(0)
#define set_mb(__var, __value) \
	do { __var = __value; membar_storeload_storestore(); } while(0)
	do { __var = __value; membar_safe("#StoreLoad"); } while(0)

#ifdef CONFIG_SMP
#define smp_mb()	mb()
#define smp_rmb()	rmb()
#define smp_wmb()	wmb()
#define smp_read_barrier_depends()	read_barrier_depends()
#else
#define smp_mb()	__asm__ __volatile__("":::"memory")
#define smp_rmb()	__asm__ __volatile__("":::"memory")
#define smp_wmb()	__asm__ __volatile__("":::"memory")
#define smp_read_barrier_depends()	do { } while(0)
#endif

#define smp_read_barrier_depends()	do { } while(0)

#define flushi(addr)	__asm__ __volatile__ ("flush %0" : : "r" (addr) : "memory")

#define flushw_all()	__asm__ __volatile__("flushw")
@@ -209,14 +198,12 @@ static inline unsigned long xchg32(__volatile__ unsigned int *m, unsigned int va
	unsigned long tmp1, tmp2;

	__asm__ __volatile__(
"	membar		#StoreLoad | #LoadLoad\n"
"	mov		%0, %1\n"
"1:	lduw		[%4], %2\n"
"	cas		[%4], %2, %0\n"
"	cmp		%2, %0\n"
"	bne,a,pn	%%icc, 1b\n"
"	 mov		%1, %0\n"
"	membar		#StoreLoad | #StoreStore\n"
	: "=&r" (val), "=&r" (tmp1), "=&r" (tmp2)
	: "0" (val), "r" (m)
	: "cc", "memory");
@@ -228,14 +215,12 @@ static inline unsigned long xchg64(__volatile__ unsigned long *m, unsigned long
	unsigned long tmp1, tmp2;

	__asm__ __volatile__(
"	membar		#StoreLoad | #LoadLoad\n"
"	mov		%0, %1\n"
"1:	ldx		[%4], %2\n"
"	casx		[%4], %2, %0\n"
"	cmp		%2, %0\n"
"	bne,a,pn	%%xcc, 1b\n"
"	 mov		%1, %0\n"
"	membar		#StoreLoad | #StoreStore\n"
	: "=&r" (val), "=&r" (tmp1), "=&r" (tmp2)
	: "0" (val), "r" (m)
	: "cc", "memory");
@@ -272,9 +257,7 @@ extern void die_if_kernel(char *str, struct pt_regs *regs) __attribute__ ((noret
static inline unsigned long
__cmpxchg_u32(volatile int *m, int old, int new)
{
	__asm__ __volatile__("membar #StoreLoad | #LoadLoad\n"
			     "cas [%2], %3, %0\n\t"
			     "membar #StoreLoad | #StoreStore"
	__asm__ __volatile__("cas [%2], %3, %0"
			     : "=&r" (new)
			     : "0" (new), "r" (m), "r" (old)
			     : "memory");
@@ -285,9 +268,7 @@ __cmpxchg_u32(volatile int *m, int old, int new)
static inline unsigned long
__cmpxchg_u64(volatile long *m, unsigned long old, unsigned long new)
{
	__asm__ __volatile__("membar #StoreLoad | #LoadLoad\n"
			     "casx [%2], %3, %0\n\t"
			     "membar #StoreLoad | #StoreStore"
	__asm__ __volatile__("casx [%2], %3, %0"
			     : "=&r" (new)
			     : "0" (new), "r" (m), "r" (old)
			     : "memory");
+0 −6
Original line number Diff line number Diff line
@@ -50,8 +50,6 @@
#define TSB_TAG_INVALID_BIT	46
#define TSB_TAG_INVALID_HIGH	(1 << (TSB_TAG_INVALID_BIT - 32))

#define TSB_MEMBAR	membar	#StoreStore

/* Some cpus support physical address quad loads.  We want to use
 * those if possible so we don't need to hard-lock the TSB mapping
 * into the TLB.  We encode some instruction patching in order to
@@ -128,13 +126,11 @@ extern struct tsb_phys_patch_entry __tsb_phys_patch, __tsb_phys_patch_end;
	cmp	REG1, REG2;		\
	bne,pn	%icc, 99b;		\
	 nop;				\
	TSB_MEMBAR

#define TSB_WRITE(TSB, TTE, TAG) \
	add	TSB, 0x8, TSB;   \
	TSB_STORE(TSB, TTE);     \
	sub	TSB, 0x8, TSB;   \
	TSB_MEMBAR;              \
	TSB_STORE(TSB, TAG);

#define KTSB_LOAD_QUAD(TSB, REG) \
@@ -153,13 +149,11 @@ extern struct tsb_phys_patch_entry __tsb_phys_patch, __tsb_phys_patch_end;
	cmp	REG1, REG2;		\
	bne,pn	%icc, 99b;		\
	 nop;				\
	TSB_MEMBAR

#define KTSB_WRITE(TSB, TTE, TAG) \
	add	TSB, 0x8, TSB;   \
	stxa	TTE, [TSB] ASI_N;     \
	sub	TSB, 0x8, TSB;   \
	TSB_MEMBAR;              \
	stxa	TAG, [TSB] ASI_N;

	/* Do a kernel page table walk.  Leaves physical PTE pointer in
Loading