Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 4edac529 authored by Peter Zijlstra's avatar Peter Zijlstra Committed by Ingo Molnar
Browse files

locking/atomic, arch/mips: Implement atomic{,64}_fetch_{add,sub,and,or,xor}()



Implement FETCH-OP atomic primitives, these are very similar to the
existing OP-RETURN primitives we already have, except they return the
value of the atomic variable _before_ modification.

This is especially useful for irreversible operations -- such as
bitops (because it becomes impossible to reconstruct the state prior
to modification).

Signed-off-by: default avatarPeter Zijlstra (Intel) <peterz@infradead.org>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Ralf Baechle <ralf@linux-mips.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: linux-arch@vger.kernel.org
Cc: linux-kernel@vger.kernel.org
Cc: linux-mips@linux-mips.org
Signed-off-by: default avatarIngo Molnar <mingo@kernel.org>
parent e898eb27
Loading
Loading
Loading
Loading
+129 −9
Original line number Diff line number Diff line
@@ -130,18 +130,78 @@ static __inline__ int atomic_##op##_return(int i, atomic_t * v) \
	return result;							      \
}

#define ATOMIC_FETCH_OP(op, c_op, asm_op)				      \
static __inline__ int atomic_fetch_##op(int i, atomic_t * v)		      \
{									      \
	int result;							      \
									      \
	smp_mb__before_llsc();						      \
									      \
	if (kernel_uses_llsc && R10000_LLSC_WAR) {			      \
		int temp;						      \
									      \
		__asm__ __volatile__(					      \
		"	.set	arch=r4000				\n"   \
		"1:	ll	%1, %2		# atomic_fetch_" #op "	\n"   \
		"	" #asm_op " %0, %1, %3				\n"   \
		"	sc	%0, %2					\n"   \
		"	beqzl	%0, 1b					\n"   \
		"	move	%0, %1					\n"   \
		"	.set	mips0					\n"   \
		: "=&r" (result), "=&r" (temp),				      \
		  "+" GCC_OFF_SMALL_ASM() (v->counter)			      \
		: "Ir" (i));						      \
	} else if (kernel_uses_llsc) {					      \
		int temp;						      \
									      \
		do {							      \
			__asm__ __volatile__(				      \
			"	.set	"MIPS_ISA_LEVEL"		\n"   \
			"	ll	%1, %2	# atomic_fetch_" #op "	\n"   \
			"	" #asm_op " %0, %1, %3			\n"   \
			"	sc	%0, %2				\n"   \
			"	.set	mips0				\n"   \
			: "=&r" (result), "=&r" (temp),			      \
			  "+" GCC_OFF_SMALL_ASM() (v->counter)		      \
			: "Ir" (i));					      \
		} while (unlikely(!result));				      \
									      \
		result = temp;						      \
	} else {							      \
		unsigned long flags;					      \
									      \
		raw_local_irq_save(flags);				      \
		result = v->counter;					      \
		v->counter c_op i;					      \
		raw_local_irq_restore(flags);				      \
	}								      \
									      \
	smp_llsc_mb();							      \
									      \
	return result;							      \
}

#define ATOMIC_OPS(op, c_op, asm_op)					      \
	ATOMIC_OP(op, c_op, asm_op)					      \
	ATOMIC_OP_RETURN(op, c_op, asm_op)
	ATOMIC_OP_RETURN(op, c_op, asm_op)				      \
	ATOMIC_FETCH_OP(op, c_op, asm_op)

ATOMIC_OPS(add, +=, addu)
ATOMIC_OPS(sub, -=, subu)

ATOMIC_OP(and, &=, and)
ATOMIC_OP(or, |=, or)
ATOMIC_OP(xor, ^=, xor)
#undef ATOMIC_OPS
#define ATOMIC_OPS(op, c_op, asm_op)					      \
	ATOMIC_OP(op, c_op, asm_op)					      \
	ATOMIC_FETCH_OP(op, c_op, asm_op)

#define atomic_fetch_or atomic_fetch_or

ATOMIC_OPS(and, &=, and)
ATOMIC_OPS(or, |=, or)
ATOMIC_OPS(xor, ^=, xor)

#undef ATOMIC_OPS
#undef ATOMIC_FETCH_OP
#undef ATOMIC_OP_RETURN
#undef ATOMIC_OP

@@ -414,17 +474,77 @@ static __inline__ long atomic64_##op##_return(long i, atomic64_t * v) \
	return result;							      \
}

#define ATOMIC64_FETCH_OP(op, c_op, asm_op)				      \
static __inline__ long atomic64_fetch_##op(long i, atomic64_t * v)	      \
{									      \
	long result;							      \
									      \
	smp_mb__before_llsc();						      \
									      \
	if (kernel_uses_llsc && R10000_LLSC_WAR) {			      \
		long temp;						      \
									      \
		__asm__ __volatile__(					      \
		"	.set	arch=r4000				\n"   \
		"1:	lld	%1, %2		# atomic64_fetch_" #op "\n"   \
		"	" #asm_op " %0, %1, %3				\n"   \
		"	scd	%0, %2					\n"   \
		"	beqzl	%0, 1b					\n"   \
		"	move	%0, %1					\n"   \
		"	.set	mips0					\n"   \
		: "=&r" (result), "=&r" (temp),				      \
		  "+" GCC_OFF_SMALL_ASM() (v->counter)			      \
		: "Ir" (i));						      \
	} else if (kernel_uses_llsc) {					      \
		long temp;						      \
									      \
		do {							      \
			__asm__ __volatile__(				      \
			"	.set	"MIPS_ISA_LEVEL"		\n"   \
			"	lld	%1, %2	# atomic64_fetch_" #op "\n"   \
			"	" #asm_op " %0, %1, %3			\n"   \
			"	scd	%0, %2				\n"   \
			"	.set	mips0				\n"   \
			: "=&r" (result), "=&r" (temp),			      \
			  "=" GCC_OFF_SMALL_ASM() (v->counter)		      \
			: "Ir" (i), GCC_OFF_SMALL_ASM() (v->counter)	      \
			: "memory");					      \
		} while (unlikely(!result));				      \
									      \
		result = temp;						      \
	} else {							      \
		unsigned long flags;					      \
									      \
		raw_local_irq_save(flags);				      \
		result = v->counter;					      \
		v->counter c_op i;					      \
		raw_local_irq_restore(flags);				      \
	}								      \
									      \
	smp_llsc_mb();							      \
									      \
	return result;							      \
}

#define ATOMIC64_OPS(op, c_op, asm_op)					      \
	ATOMIC64_OP(op, c_op, asm_op)					      \
	ATOMIC64_OP_RETURN(op, c_op, asm_op)
	ATOMIC64_OP_RETURN(op, c_op, asm_op)				      \
	ATOMIC64_FETCH_OP(op, c_op, asm_op)

ATOMIC64_OPS(add, +=, daddu)
ATOMIC64_OPS(sub, -=, dsubu)
ATOMIC64_OP(and, &=, and)
ATOMIC64_OP(or, |=, or)
ATOMIC64_OP(xor, ^=, xor)

#undef ATOMIC64_OPS
#define ATOMIC64_OPS(op, c_op, asm_op)					      \
	ATOMIC64_OP(op, c_op, asm_op)					      \
	ATOMIC64_FETCH_OP(op, c_op, asm_op)

ATOMIC64_OPS(and, &=, and)
ATOMIC64_OPS(or, |=, or)
ATOMIC64_OPS(xor, ^=, xor)

#undef ATOMIC64_OPS
#undef ATOMIC64_FETCH_OP
#undef ATOMIC64_OP_RETURN
#undef ATOMIC64_OP