Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit f252ffd5 authored by David Daney's avatar David Daney Committed by Ralf Baechle
Browse files

MIPS: New macro smp_mb__before_llsc.



Replace some instances of smp_llsc_mb() with a new macro
smp_mb__before_llsc().  It is used before ll/sc sequences that are
documented as needing write barrier semantics.

The default implementation of smp_mb__before_llsc() is just smp_llsc_mb(),
so there are no changes in semantics.

Also simplify definition of smp_mb(), smp_rmb(), and smp_wmb() to be just
barrier() in the non-SMP case.

Signed-off-by: default avatarDavid Daney <ddaney@caviumnetworks.com>
To: linux-mips@linux-mips.org
Patchwork: http://patchwork.linux-mips.org/patch/851/


Signed-off-by: default avatarRalf Baechle <ralf@linux-mips.org>
parent ec5380c7
Loading
Loading
Loading
Loading
+8 −8
Original line number Diff line number Diff line
@@ -137,7 +137,7 @@ static __inline__ int atomic_add_return(int i, atomic_t * v)
{
	int result;

	smp_llsc_mb();
	smp_mb__before_llsc();

	if (kernel_uses_llsc && R10000_LLSC_WAR) {
		int temp;
@@ -189,7 +189,7 @@ static __inline__ int atomic_sub_return(int i, atomic_t * v)
{
	int result;

	smp_llsc_mb();
	smp_mb__before_llsc();

	if (kernel_uses_llsc && R10000_LLSC_WAR) {
		int temp;
@@ -249,7 +249,7 @@ static __inline__ int atomic_sub_if_positive(int i, atomic_t * v)
{
	int result;

	smp_llsc_mb();
	smp_mb__before_llsc();

	if (kernel_uses_llsc && R10000_LLSC_WAR) {
		int temp;
@@ -516,7 +516,7 @@ static __inline__ long atomic64_add_return(long i, atomic64_t * v)
{
	long result;

	smp_llsc_mb();
	smp_mb__before_llsc();

	if (kernel_uses_llsc && R10000_LLSC_WAR) {
		long temp;
@@ -568,7 +568,7 @@ static __inline__ long atomic64_sub_return(long i, atomic64_t * v)
{
	long result;

	smp_llsc_mb();
	smp_mb__before_llsc();

	if (kernel_uses_llsc && R10000_LLSC_WAR) {
		long temp;
@@ -628,7 +628,7 @@ static __inline__ long atomic64_sub_if_positive(long i, atomic64_t * v)
{
	long result;

	smp_llsc_mb();
	smp_mb__before_llsc();

	if (kernel_uses_llsc && R10000_LLSC_WAR) {
		long temp;
@@ -788,9 +788,9 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
 * atomic*_return operations are serializing but not the non-*_return
 * versions.
 */
#define smp_mb__before_atomic_dec()	smp_llsc_mb()
#define smp_mb__before_atomic_dec()	smp_mb__before_llsc()
#define smp_mb__after_atomic_dec()	smp_llsc_mb()
#define smp_mb__before_atomic_inc()	smp_llsc_mb()
#define smp_mb__before_atomic_inc()	smp_mb__before_llsc()
#define smp_mb__after_atomic_inc()	smp_llsc_mb()

#include <asm-generic/atomic-long.h>
+9 −6
Original line number Diff line number Diff line
@@ -131,23 +131,26 @@
#endif /* !CONFIG_CPU_HAS_WB */

#if defined(CONFIG_WEAK_ORDERING) && defined(CONFIG_SMP)
#define __WEAK_ORDERING_MB	"       sync	\n"
#define smp_mb()	__asm__ __volatile__("sync" : : :"memory")
#define smp_rmb()	__asm__ __volatile__("sync" : : :"memory")
#define smp_wmb()	__asm__ __volatile__("sync" : : :"memory")
#else
#define __WEAK_ORDERING_MB	"		\n"
#define smp_mb()	barrier()
#define smp_rmb()	barrier()
#define smp_wmb()	barrier()
#endif

#if defined(CONFIG_WEAK_REORDERING_BEYOND_LLSC) && defined(CONFIG_SMP)
#define __WEAK_LLSC_MB		"       sync	\n"
#else
#define __WEAK_LLSC_MB		"		\n"
#endif

#define smp_mb()	__asm__ __volatile__(__WEAK_ORDERING_MB : : :"memory")
#define smp_rmb()	__asm__ __volatile__(__WEAK_ORDERING_MB : : :"memory")
#define smp_wmb()	__asm__ __volatile__(__WEAK_ORDERING_MB : : :"memory")

#define set_mb(var, value) \
	do { var = value; smp_mb(); } while (0)

#define smp_llsc_mb()	__asm__ __volatile__(__WEAK_LLSC_MB : : :"memory")

#define smp_mb__before_llsc() smp_llsc_mb()

#endif /* __ASM_BARRIER_H */
+4 −4
Original line number Diff line number Diff line
@@ -42,7 +42,7 @@
/*
 * clear_bit() doesn't provide any barrier for the compiler.
 */
#define smp_mb__before_clear_bit()	smp_llsc_mb()
#define smp_mb__before_clear_bit()	smp_mb__before_llsc()
#define smp_mb__after_clear_bit()	smp_llsc_mb()

/*
@@ -258,7 +258,7 @@ static inline int test_and_set_bit(unsigned long nr,
	unsigned short bit = nr & SZLONG_MASK;
	unsigned long res;

	smp_llsc_mb();
	smp_mb__before_llsc();

	if (kernel_uses_llsc && R10000_LLSC_WAR) {
		unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
@@ -395,7 +395,7 @@ static inline int test_and_clear_bit(unsigned long nr,
	unsigned short bit = nr & SZLONG_MASK;
	unsigned long res;

	smp_llsc_mb();
	smp_mb__before_llsc();

	if (kernel_uses_llsc && R10000_LLSC_WAR) {
		unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
@@ -485,7 +485,7 @@ static inline int test_and_change_bit(unsigned long nr,
	unsigned short bit = nr & SZLONG_MASK;
	unsigned long res;

	smp_llsc_mb();
	smp_mb__before_llsc();

	if (kernel_uses_llsc && R10000_LLSC_WAR) {
		unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
+5 −5
Original line number Diff line number Diff line
@@ -72,14 +72,14 @@
 */
extern void __cmpxchg_called_with_bad_pointer(void);

#define __cmpxchg(ptr, old, new, barrier)				\
#define __cmpxchg(ptr, old, new, pre_barrier, post_barrier)		\
({									\
	__typeof__(ptr) __ptr = (ptr);					\
	__typeof__(*(ptr)) __old = (old);				\
	__typeof__(*(ptr)) __new = (new);				\
	__typeof__(*(ptr)) __res = 0;					\
									\
	barrier;							\
	pre_barrier;							\
									\
	switch (sizeof(*(__ptr))) {					\
	case 4:								\
@@ -96,13 +96,13 @@ extern void __cmpxchg_called_with_bad_pointer(void);
		break;							\
	}								\
									\
	barrier;							\
	post_barrier;							\
									\
	__res;								\
})

#define cmpxchg(ptr, old, new)		__cmpxchg(ptr, old, new, smp_llsc_mb())
#define cmpxchg_local(ptr, old, new)	__cmpxchg(ptr, old, new, )
#define cmpxchg(ptr, old, new)		__cmpxchg(ptr, old, new, smp_mb__before_llsc(), smp_llsc_mb())
#define cmpxchg_local(ptr, old, new)	__cmpxchg(ptr, old, new, , )

#define cmpxchg64(ptr, o, n)						\
  ({									\
+2 −2
Original line number Diff line number Diff line
@@ -138,7 +138,7 @@ static inline void arch_spin_unlock(arch_spinlock_t *lock)
{
	int tmp;

	smp_llsc_mb();
	smp_mb__before_llsc();

	if (R10000_LLSC_WAR) {
		__asm__ __volatile__ (
@@ -305,7 +305,7 @@ static inline void arch_read_unlock(arch_rwlock_t *rw)
{
	unsigned int tmp;

	smp_llsc_mb();
	smp_mb__before_llsc();

	if (R10000_LLSC_WAR) {
		__asm__ __volatile__(
Loading