Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 6b1e7629 authored by Paul Burton's avatar Paul Burton Committed by Ralf Baechle
Browse files

MIPS: cmpxchg: Unify R10000_LLSC_WAR & non-R10000_LLSC_WAR cases



Prior to this patch the xchg & cmpxchg functions have duplicated code
which is for all intents & purposes identical apart from use of a
branch-likely instruction in the R10000_LLSC_WAR case & a regular branch
instruction in the non-R10000_LLSC_WAR case.

This patch removes the duplication, declaring a __scbeqz macro to select
the branch instruction suitable for use when checking the result of an
sc instruction & making use of it to unify the 2 cases.

In __xchg_u{32,64}() this means writing the branch in asm, where it was
previously being done in C as a do...while loop for the
non-R10000_LLSC_WAR case. As this is a single instruction, and adds
consistency with the R10000_LLSC_WAR cases & the cmpxchg() code, this
seems worthwhile.

Signed-off-by: default avatarPaul Burton <paul.burton@imgtec.com>
Cc: linux-mips@linux-mips.org
Patchwork: https://patchwork.linux-mips.org/patch/16348/


Signed-off-by: default avatarRalf Baechle <ralf@linux-mips.org>
parent 3f88ec63
Loading
Loading
Loading
Loading
+22 −58
Original line number Diff line number Diff line
@@ -13,44 +13,38 @@
#include <asm/compiler.h>
#include <asm/war.h>

/*
 * Using a branch-likely instruction to check the result of an sc instruction
 * works around a bug present in R10000 CPUs prior to revision 3.0 that could
 * cause ll-sc sequences to execute non-atomically.
 */
#if R10000_LLSC_WAR
# define __scbeqz "beqzl"
#else
# define __scbeqz "beqz"
#endif

static inline unsigned long __xchg_u32(volatile int * m, unsigned int val)
{
	__u32 retval;

	smp_mb__before_llsc();

	if (kernel_uses_llsc && R10000_LLSC_WAR) {
	if (kernel_uses_llsc) {
		unsigned long dummy;

		__asm__ __volatile__(
		"	.set	arch=r4000				\n"
		"1:	ll	%0, %3			# xchg_u32	\n"
		"	.set	mips0					\n"
		"	move	%2, %z4					\n"
		"	.set	arch=r4000				\n"
		"	sc	%2, %1					\n"
		"	beqzl	%2, 1b					\n"
		"	.set	mips0					\n"
		: "=&r" (retval), "=" GCC_OFF_SMALL_ASM() (*m), "=&r" (dummy)
		: GCC_OFF_SMALL_ASM() (*m), "Jr" (val)
		: "memory");
	} else if (kernel_uses_llsc) {
		unsigned long dummy;

		do {
		__asm__ __volatile__(
		"	.set	" MIPS_ISA_ARCH_LEVEL "			\n"
			"	ll	%0, %3		# xchg_u32	\n"
		"1:	ll	%0, %3			# xchg_u32	\n"
		"	.set	mips0					\n"
		"	move	%2, %z4					\n"
		"	.set	" MIPS_ISA_ARCH_LEVEL "			\n"
		"	sc	%2, %1					\n"
		"\t" __scbeqz "	%2, 1b					\n"
		"	.set	mips0					\n"
			: "=&r" (retval), "=" GCC_OFF_SMALL_ASM() (*m),
			  "=&r" (dummy)
		: "=&r" (retval), "=" GCC_OFF_SMALL_ASM() (*m), "=&r" (dummy)
		: GCC_OFF_SMALL_ASM() (*m), "Jr" (val)
		: "memory");
		} while (unlikely(!dummy));
	} else {
		unsigned long flags;

@@ -72,34 +66,19 @@ static inline __u64 __xchg_u64(volatile __u64 * m, __u64 val)

	smp_mb__before_llsc();

	if (kernel_uses_llsc && R10000_LLSC_WAR) {
	if (kernel_uses_llsc) {
		unsigned long dummy;

		__asm__ __volatile__(
		"	.set	arch=r4000				\n"
		"	.set	" MIPS_ISA_ARCH_LEVEL "			\n"
		"1:	lld	%0, %3			# xchg_u64	\n"
		"	move	%2, %z4					\n"
		"	scd	%2, %1					\n"
		"	beqzl	%2, 1b					\n"
		"\t" __scbeqz "	%2, 1b					\n"
		"	.set	mips0					\n"
		: "=&r" (retval), "=" GCC_OFF_SMALL_ASM() (*m), "=&r" (dummy)
		: GCC_OFF_SMALL_ASM() (*m), "Jr" (val)
		: "memory");
	} else if (kernel_uses_llsc) {
		unsigned long dummy;

		do {
			__asm__ __volatile__(
			"	.set	"MIPS_ISA_ARCH_LEVEL"		\n"
			"	lld	%0, %3		# xchg_u64	\n"
			"	move	%2, %z4				\n"
			"	scd	%2, %1				\n"
			"	.set	mips0				\n"
			: "=&r" (retval), "=" GCC_OFF_SMALL_ASM() (*m),
			  "=&r" (dummy)
			: GCC_OFF_SMALL_ASM() (*m), "Jr" (val)
			: "memory");
		} while (unlikely(!dummy));
	} else {
		unsigned long flags;

@@ -142,24 +121,7 @@ static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int siz
({									\
	__typeof(*(m)) __ret;						\
									\
	if (kernel_uses_llsc && R10000_LLSC_WAR) {			\
		__asm__ __volatile__(					\
		"	.set	push				\n"	\
		"	.set	noat				\n"	\
		"	.set	arch=r4000			\n"	\
		"1:	" ld "	%0, %2		# __cmpxchg_asm \n"	\
		"	bne	%0, %z3, 2f			\n"	\
		"	.set	mips0				\n"	\
		"	move	$1, %z4				\n"	\
		"	.set	arch=r4000			\n"	\
		"	" st "	$1, %1				\n"	\
		"	beqzl	$1, 1b				\n"	\
		"2:						\n"	\
		"	.set	pop				\n"	\
		: "=&r" (__ret), "=" GCC_OFF_SMALL_ASM() (*m)		\
		: GCC_OFF_SMALL_ASM() (*m), "Jr" (old), "Jr" (new)		\
		: "memory");						\
	} else if (kernel_uses_llsc) {					\
	if (kernel_uses_llsc) {						\
		__asm__ __volatile__(					\
		"	.set	push				\n"	\
		"	.set	noat				\n"	\
@@ -170,7 +132,7 @@ static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int siz
		"	move	$1, %z4				\n"	\
		"	.set	"MIPS_ISA_ARCH_LEVEL"		\n"	\
		"	" st "	$1, %1				\n"	\
		"	beqz	$1, 1b				\n"	\
		"\t" __scbeqz "	$1, 1b				\n"	\
		"	.set	pop				\n"	\
		"2:						\n"	\
		: "=&r" (__ret), "=" GCC_OFF_SMALL_ASM() (*m)		\
@@ -245,4 +207,6 @@ extern void __cmpxchg_called_with_bad_pointer(void);
#define cmpxchg64(ptr, o, n) cmpxchg64_local((ptr), (o), (n))
#endif

#undef __scbeqz

#endif /* __ASM_CMPXCHG_H */