Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 17099b11 authored by Ralf Baechle's avatar Ralf Baechle
Browse files

[MIPS] Make support for weakly ordered LL/SC a config option.



None of weakly ordered processor supported in tree need this but it seems
like this could change ...

Signed-off-by: default avatarRalf Baechle <ralf@linux-mips.org>
parent ed203dad
Loading
Loading
Loading
Loading
+11 −0
Original line number Original line Diff line number Diff line
@@ -1190,8 +1190,19 @@ config SYS_HAS_CPU_RM9000
config SYS_HAS_CPU_SB1
config SYS_HAS_CPU_SB1
	bool
	bool


#
# CPU may reorder R->R, R->W, W->R, W->W
# Reordering beyond LL and SC is handled in WEAK_REORDERING_BEYOND_LLSC
#
config WEAK_ORDERING
config WEAK_ORDERING
	bool
	bool

#
# CPU may reorder reads and writes beyond LL/SC
# CPU may reorder R->LL, R->LL, W->LL, W->LL, R->SC, R->SC, W->SC, W->SC
#
config WEAK_REORDERING_BEYOND_LLSC
	bool
endmenu
endmenu


#
#
+17 −16
Original line number Original line Diff line number Diff line
@@ -138,7 +138,7 @@ static __inline__ int atomic_add_return(int i, atomic_t * v)
{
{
	unsigned long result;
	unsigned long result;


	smp_mb();
	smp_llsc_mb();


	if (cpu_has_llsc && R10000_LLSC_WAR) {
	if (cpu_has_llsc && R10000_LLSC_WAR) {
		unsigned long temp;
		unsigned long temp;
@@ -181,7 +181,7 @@ static __inline__ int atomic_add_return(int i, atomic_t * v)
		raw_local_irq_restore(flags);
		raw_local_irq_restore(flags);
	}
	}


	smp_mb();
	smp_llsc_mb();


	return result;
	return result;
}
}
@@ -190,7 +190,7 @@ static __inline__ int atomic_sub_return(int i, atomic_t * v)
{
{
	unsigned long result;
	unsigned long result;


	smp_mb();
	smp_llsc_mb();


	if (cpu_has_llsc && R10000_LLSC_WAR) {
	if (cpu_has_llsc && R10000_LLSC_WAR) {
		unsigned long temp;
		unsigned long temp;
@@ -233,7 +233,7 @@ static __inline__ int atomic_sub_return(int i, atomic_t * v)
		raw_local_irq_restore(flags);
		raw_local_irq_restore(flags);
	}
	}


	smp_mb();
	smp_llsc_mb();


	return result;
	return result;
}
}
@@ -250,7 +250,7 @@ static __inline__ int atomic_sub_if_positive(int i, atomic_t * v)
{
{
	unsigned long result;
	unsigned long result;


	smp_mb();
	smp_llsc_mb();


	if (cpu_has_llsc && R10000_LLSC_WAR) {
	if (cpu_has_llsc && R10000_LLSC_WAR) {
		unsigned long temp;
		unsigned long temp;
@@ -302,7 +302,7 @@ static __inline__ int atomic_sub_if_positive(int i, atomic_t * v)
		raw_local_irq_restore(flags);
		raw_local_irq_restore(flags);
	}
	}


	smp_mb();
	smp_llsc_mb();


	return result;
	return result;
}
}
@@ -519,7 +519,7 @@ static __inline__ long atomic64_add_return(long i, atomic64_t * v)
{
{
	unsigned long result;
	unsigned long result;


	smp_mb();
	smp_llsc_mb();


	if (cpu_has_llsc && R10000_LLSC_WAR) {
	if (cpu_has_llsc && R10000_LLSC_WAR) {
		unsigned long temp;
		unsigned long temp;
@@ -562,7 +562,7 @@ static __inline__ long atomic64_add_return(long i, atomic64_t * v)
		raw_local_irq_restore(flags);
		raw_local_irq_restore(flags);
	}
	}


	smp_mb();
	smp_llsc_mb();


	return result;
	return result;
}
}
@@ -571,7 +571,7 @@ static __inline__ long atomic64_sub_return(long i, atomic64_t * v)
{
{
	unsigned long result;
	unsigned long result;


	smp_mb();
	smp_llsc_mb();


	if (cpu_has_llsc && R10000_LLSC_WAR) {
	if (cpu_has_llsc && R10000_LLSC_WAR) {
		unsigned long temp;
		unsigned long temp;
@@ -614,7 +614,7 @@ static __inline__ long atomic64_sub_return(long i, atomic64_t * v)
		raw_local_irq_restore(flags);
		raw_local_irq_restore(flags);
	}
	}


	smp_mb();
	smp_llsc_mb();


	return result;
	return result;
}
}
@@ -631,7 +631,7 @@ static __inline__ long atomic64_sub_if_positive(long i, atomic64_t * v)
{
{
	unsigned long result;
	unsigned long result;


	smp_mb();
	smp_llsc_mb();


	if (cpu_has_llsc && R10000_LLSC_WAR) {
	if (cpu_has_llsc && R10000_LLSC_WAR) {
		unsigned long temp;
		unsigned long temp;
@@ -683,7 +683,7 @@ static __inline__ long atomic64_sub_if_positive(long i, atomic64_t * v)
		raw_local_irq_restore(flags);
		raw_local_irq_restore(flags);
	}
	}


	smp_mb();
	smp_llsc_mb();


	return result;
	return result;
}
}
@@ -791,10 +791,11 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
 * atomic*_return operations are serializing but not the non-*_return
 * atomic*_return operations are serializing but not the non-*_return
 * versions.
 * versions.
 */
 */
#define smp_mb__before_atomic_dec()	smp_mb()
#define smp_mb__before_atomic_dec()	smp_llsc_mb()
#define smp_mb__after_atomic_dec()	smp_mb()
#define smp_mb__after_atomic_dec()	smp_llsc_mb()
#define smp_mb__before_atomic_inc()	smp_mb()
#define smp_mb__before_atomic_inc()	smp_llsc_mb()
#define smp_mb__after_atomic_inc()	smp_mb()
#define smp_mb__after_atomic_inc()	smp_llsc_mb()


#include <asm-generic/atomic.h>
#include <asm-generic/atomic.h>

#endif /* _ASM_ATOMIC_H */
#endif /* _ASM_ATOMIC_H */
+9 −0
Original line number Original line Diff line number Diff line
@@ -121,6 +121,11 @@
#else
#else
#define __WEAK_ORDERING_MB	"		\n"
#define __WEAK_ORDERING_MB	"		\n"
#endif
#endif
#if defined(CONFIG_WEAK_REORDERING_BEYOND_LLSC) && defined(CONFIG_SMP)
#define __WEAK_LLSC_MB		"       sync	\n"
#else
#define __WEAK_LLSC_MB		"		\n"
#endif


#define smp_mb()	__asm__ __volatile__(__WEAK_ORDERING_MB : : :"memory")
#define smp_mb()	__asm__ __volatile__(__WEAK_ORDERING_MB : : :"memory")
#define smp_rmb()	__asm__ __volatile__(__WEAK_ORDERING_MB : : :"memory")
#define smp_rmb()	__asm__ __volatile__(__WEAK_ORDERING_MB : : :"memory")
@@ -129,4 +134,8 @@
#define set_mb(var, value) \
#define set_mb(var, value) \
	do { var = value; smp_mb(); } while (0)
	do { var = value; smp_mb(); } while (0)


#define smp_llsc_mb()	__asm__ __volatile__(__WEAK_LLSC_MB : : :"memory")
#define smp_llsc_rmb()	__asm__ __volatile__(__WEAK_LLSC_MB : : :"memory")
#define smp_llsc_wmb()	__asm__ __volatile__(__WEAK_LLSC_MB : : :"memory")

#endif /* __ASM_BARRIER_H */
#endif /* __ASM_BARRIER_H */
+5 −5
Original line number Original line Diff line number Diff line
@@ -38,8 +38,8 @@
/*
/*
 * clear_bit() doesn't provide any barrier for the compiler.
 * clear_bit() doesn't provide any barrier for the compiler.
 */
 */
#define smp_mb__before_clear_bit()	smp_mb()
#define smp_mb__before_clear_bit()	smp_llsc_mb()
#define smp_mb__after_clear_bit()	smp_mb()
#define smp_mb__after_clear_bit()	smp_llsc_mb()


/*
/*
 * set_bit - Atomically set a bit in memory
 * set_bit - Atomically set a bit in memory
@@ -289,7 +289,7 @@ static inline int test_and_set_bit(unsigned long nr,
		raw_local_irq_restore(flags);
		raw_local_irq_restore(flags);
	}
	}


	smp_mb();
	smp_llsc_mb();


	return res != 0;
	return res != 0;
}
}
@@ -377,7 +377,7 @@ static inline int test_and_clear_bit(unsigned long nr,
		raw_local_irq_restore(flags);
		raw_local_irq_restore(flags);
	}
	}


	smp_mb();
	smp_llsc_mb();


	return res != 0;
	return res != 0;
}
}
@@ -445,7 +445,7 @@ static inline int test_and_change_bit(unsigned long nr,
		raw_local_irq_restore(flags);
		raw_local_irq_restore(flags);
	}
	}


	smp_mb();
	smp_llsc_mb();


	return res != 0;
	return res != 0;
}
}
+4 −4
Original line number Original line Diff line number Diff line
@@ -29,7 +29,7 @@
		"	.set	mips3				\n"	\
		"	.set	mips3				\n"	\
		"2:	sc	$1, %2				\n"	\
		"2:	sc	$1, %2				\n"	\
		"	beqzl	$1, 1b				\n"	\
		"	beqzl	$1, 1b				\n"	\
		__WEAK_ORDERING_MB					\
		__WEAK_LLSC_MB						\
		"3:						\n"	\
		"3:						\n"	\
		"	.set	pop				\n"	\
		"	.set	pop				\n"	\
		"	.set	mips0				\n"	\
		"	.set	mips0				\n"	\
@@ -55,7 +55,7 @@
		"	.set	mips3				\n"	\
		"	.set	mips3				\n"	\
		"2:	sc	$1, %2				\n"	\
		"2:	sc	$1, %2				\n"	\
		"	beqz	$1, 1b				\n"	\
		"	beqz	$1, 1b				\n"	\
		__WEAK_ORDERING_MB					\
		__WEAK_LLSC_MB						\
		"3:						\n"	\
		"3:						\n"	\
		"	.set	pop				\n"	\
		"	.set	pop				\n"	\
		"	.set	mips0				\n"	\
		"	.set	mips0				\n"	\
@@ -152,7 +152,7 @@ futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval, int newval)
		"	.set	mips3					\n"
		"	.set	mips3					\n"
		"2:	sc	$1, %1					\n"
		"2:	sc	$1, %1					\n"
		"	beqzl	$1, 1b					\n"
		"	beqzl	$1, 1b					\n"
		__WEAK_ORDERING_MB
		__WEAK_LLSC_MB
		"3:							\n"
		"3:							\n"
		"	.set	pop					\n"
		"	.set	pop					\n"
		"	.section .fixup,\"ax\"				\n"
		"	.section .fixup,\"ax\"				\n"
@@ -179,7 +179,7 @@ futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval, int newval)
		"	.set	mips3					\n"
		"	.set	mips3					\n"
		"2:	sc	$1, %1					\n"
		"2:	sc	$1, %1					\n"
		"	beqz	$1, 1b					\n"
		"	beqz	$1, 1b					\n"
		__WEAK_ORDERING_MB
		__WEAK_LLSC_MB
		"3:							\n"
		"3:							\n"
		"	.set	pop					\n"
		"	.set	pop					\n"
		"	.section .fixup,\"ax\"				\n"
		"	.section .fixup,\"ax\"				\n"
Loading