Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 1a6eafac authored by Peter Zijlstra's avatar Peter Zijlstra Committed by Ingo Molnar
Browse files

locking/atomic, arch/avr32: Implement atomic_fetch_{add,sub,and,or,xor}()



Implement FETCH-OP atomic primitives, these are very similar to the
existing OP-RETURN primitives we already have, except they return the
value of the atomic variable _before_ modification.

This is especially useful for irreversible operations -- such as
bitops (because it becomes impossible to reconstruct the state prior
to modification).

Signed-off-by: default avatarPeter Zijlstra (Intel) <peterz@infradead.org>
Acked-by: default avatarHans-Christian Noren Egtvedt <egtvedt@samfundet.no>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Haavard Skinnemoen <hskinnemoen@gmail.com>
Cc: Hans-Christian Egtvedt <egtvedt@samfundet.no>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: linux-arch@vger.kernel.org
Cc: linux-kernel@vger.kernel.org
Signed-off-by: default avatarIngo Molnar <mingo@kernel.org>
parent 2efe95fe
Loading
Loading
Loading
Loading
+51 −5
Original line number Diff line number Diff line
@@ -41,21 +41,51 @@ static inline int __atomic_##op##_return(int i, atomic_t *v) \
	return result;							\
}

#define ATOMIC_FETCH_OP(op, asm_op, asm_con)				\
static inline int __atomic_fetch_##op(int i, atomic_t *v)		\
{									\
	int result, val;						\
									\
	asm volatile(							\
		"/* atomic_fetch_" #op " */\n"				\
		"1:	ssrf	5\n"					\
		"	ld.w	%0, %3\n"				\
		"	mov	%1, %0\n"				\
		"	" #asm_op "	%1, %4\n"			\
		"	stcond	%2, %1\n"				\
		"	brne	1b"					\
		: "=&r" (result), "=&r" (val), "=o" (v->counter)	\
		: "m" (v->counter), #asm_con (i)			\
		: "cc");						\
									\
	return result;							\
}

ATOMIC_OP_RETURN(sub, sub, rKs21)
ATOMIC_OP_RETURN(add, add, r)
ATOMIC_FETCH_OP (sub, sub, rKs21)
ATOMIC_FETCH_OP (add, add, r)

#define atomic_fetch_or atomic_fetch_or

#define ATOMIC_OP(op, asm_op)						\
#define ATOMIC_OPS(op, asm_op)						\
ATOMIC_OP_RETURN(op, asm_op, r)						\
static inline void atomic_##op(int i, atomic_t *v)			\
{									\
	(void)__atomic_##op##_return(i, v);				\
}									\
ATOMIC_FETCH_OP(op, asm_op, r)						\
static inline int atomic_fetch_##op(int i, atomic_t *v)		\
{									\
	return __atomic_fetch_##op(i, v);				\
}

ATOMIC_OP(and, and)
ATOMIC_OP(or, or)
ATOMIC_OP(xor, eor)
ATOMIC_OPS(and, and)
ATOMIC_OPS(or, or)
ATOMIC_OPS(xor, eor)

#undef ATOMIC_OP
#undef ATOMIC_OPS
#undef ATOMIC_FETCH_OP
#undef ATOMIC_OP_RETURN

/*
@@ -87,6 +117,14 @@ static inline int atomic_add_return(int i, atomic_t *v)
	return __atomic_add_return(i, v);
}

static inline int atomic_fetch_add(int i, atomic_t *v)
{
	if (IS_21BIT_CONST(i))
		return __atomic_fetch_sub(-i, v);

	return __atomic_fetch_add(i, v);
}

/*
 * atomic_sub_return - subtract the atomic variable
 * @i: integer value to subtract
@@ -102,6 +140,14 @@ static inline int atomic_sub_return(int i, atomic_t *v)
	return __atomic_add_return(-i, v);
}

static inline int atomic_fetch_sub(int i, atomic_t *v)
{
	if (IS_21BIT_CONST(i))
		return __atomic_fetch_sub(i, v);

	return __atomic_fetch_add(-i, v);
}

/*
 * __atomic_add_unless - add unless the number is a given value
 * @v: pointer of type atomic_t