Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 6059a7b6 authored by Will Deacon's avatar Will Deacon
Browse files

arm64: atomics: implement atomic{,64}_cmpxchg using cmpxchg



We don't need duplicate cmpxchg implementations, so use cmpxchg to
implement atomic{,64}_cmpxchg, like we do for xchg already.

Reviewed-by: default avatarSteve Capper <steve.capper@arm.com>
Reviewed-by: default avatarCatalin Marinas <catalin.marinas@arm.com>
Signed-off-by: default avatarWill Deacon <will.deacon@arm.com>
parent 0ea366f5
Loading
Loading
Loading
Loading
+2 −0
Original line number Diff line number Diff line
@@ -56,6 +56,7 @@
#define atomic_read(v)			READ_ONCE((v)->counter)
#define atomic_set(v, i)		(((v)->counter) = (i))
#define atomic_xchg(v, new)		xchg(&((v)->counter), (new))
#define atomic_cmpxchg(v, old, new)	cmpxchg(&((v)->counter), (old), (new))

#define atomic_inc(v)			atomic_add(1, (v))
#define atomic_dec(v)			atomic_sub(1, (v))
@@ -75,6 +76,7 @@
#define atomic64_read			atomic_read
#define atomic64_set			atomic_set
#define atomic64_xchg			atomic_xchg
#define atomic64_cmpxchg		atomic_cmpxchg

#define atomic64_inc(v)			atomic64_add(1, (v))
#define atomic64_dec(v)			atomic64_sub(1, (v))
+0 −46
Original line number Diff line number Diff line
@@ -93,29 +93,6 @@ ATOMIC_OP(xor, eor)
#undef ATOMIC_OP_RETURN
#undef ATOMIC_OP

__LL_SC_INLINE int
__LL_SC_PREFIX(atomic_cmpxchg(atomic_t *ptr, int old, int new))
{
	unsigned long tmp;
	int oldval;

	asm volatile("// atomic_cmpxchg\n"
"	prfm	pstl1strm, %2\n"
"1:	ldxr	%w1, %2\n"
"	eor	%w0, %w1, %w3\n"
"	cbnz	%w0, 2f\n"
"	stlxr	%w0, %w4, %2\n"
"	cbnz	%w0, 1b\n"
"	dmb	ish\n"
"2:"
	: "=&r" (tmp), "=&r" (oldval), "+Q" (ptr->counter)
	: "Lr" (old), "r" (new)
	: "memory");

	return oldval;
}
__LL_SC_EXPORT(atomic_cmpxchg);

#define ATOMIC64_OP(op, asm_op)						\
__LL_SC_INLINE void							\
__LL_SC_PREFIX(atomic64_##op(long i, atomic64_t *v))			\
@@ -172,29 +149,6 @@ ATOMIC64_OP(xor, eor)
#undef ATOMIC64_OP_RETURN
#undef ATOMIC64_OP

__LL_SC_INLINE long
__LL_SC_PREFIX(atomic64_cmpxchg(atomic64_t *ptr, long old, long new))
{
	long oldval;
	unsigned long res;

	asm volatile("// atomic64_cmpxchg\n"
"	prfm	pstl1strm, %2\n"
"1:	ldxr	%1, %2\n"
"	eor	%0, %1, %3\n"
"	cbnz	%w0, 2f\n"
"	stlxr	%w0, %4, %2\n"
"	cbnz	%w0, 1b\n"
"	dmb	ish\n"
"2:"
	: "=&r" (res), "=&r" (oldval), "+Q" (ptr->counter)
	: "Lr" (old), "r" (new)
	: "memory");

	return oldval;
}
__LL_SC_EXPORT(atomic64_cmpxchg);

__LL_SC_INLINE long
__LL_SC_PREFIX(atomic64_dec_if_positive(atomic64_t *v))
{
+0 −43
Original line number Diff line number Diff line
@@ -149,28 +149,6 @@ static inline int atomic_sub_return(int i, atomic_t *v)
	return w0;
}

static inline int atomic_cmpxchg(atomic_t *ptr, int old, int new)
{
	register unsigned long x0 asm ("x0") = (unsigned long)ptr;
	register int w1 asm ("w1") = old;
	register int w2 asm ("w2") = new;

	asm volatile(ARM64_LSE_ATOMIC_INSN(
	/* LL/SC */
	"	nop\n"
	__LL_SC_ATOMIC(cmpxchg)
	"	nop",
	/* LSE atomics */
	"	mov	w30, %w[old]\n"
	"	casal	w30, %w[new], %[v]\n"
	"	mov	%w[ret], w30")
	: [ret] "+r" (x0), [v] "+Q" (ptr->counter)
	: [old] "r" (w1), [new] "r" (w2)
	: "x30", "memory");

	return x0;
}

#undef __LL_SC_ATOMIC

#define __LL_SC_ATOMIC64(op)	__LL_SC_CALL(atomic64_##op)
@@ -296,27 +274,6 @@ static inline long atomic64_sub_return(long i, atomic64_t *v)

	return x0;
}
static inline long atomic64_cmpxchg(atomic64_t *ptr, long old, long new)
{
	register unsigned long x0 asm ("x0") = (unsigned long)ptr;
	register long x1 asm ("x1") = old;
	register long x2 asm ("x2") = new;

	asm volatile(ARM64_LSE_ATOMIC_INSN(
	/* LL/SC */
	"	nop\n"
	__LL_SC_ATOMIC64(cmpxchg)
	"	nop",
	/* LSE atomics */
	"	mov	x30, %[old]\n"
	"	casal	x30, %[new], %[v]\n"
	"	mov	%[ret], x30")
	: [ret] "+r" (x0), [v] "+Q" (ptr->counter)
	: [old] "r" (x1), [new] "r" (x2)
	: "x30", "memory");

	return x0;
}

static inline long atomic64_dec_if_positive(atomic64_t *v)
{