Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit f7d11e93 authored by Peter Zijlstra's avatar Peter Zijlstra Committed by Ingo Molnar
Browse files

locking,arch,arc: Fold atomic_ops



Many of the atomic op implementations are the same except for one
instruction; fold the lot into a few CPP macros and reduce LoC.

This also prepares for easy addition of new ops.

Signed-off-by: default avatarPeter Zijlstra <peterz@infradead.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Cc: Vineet Gupta <vgupta@synopsys.com>
Link: http://lkml.kernel.org/r/20140508135851.886055622@infradead.org


Signed-off-by: default avatarIngo Molnar <mingo@kernel.org>
parent b93c7b8c
Loading
Loading
Loading
Loading
+63 −121
Original line number Diff line number Diff line
@@ -25,79 +25,36 @@

#define atomic_set(v, i) (((v)->counter) = (i))

static inline void atomic_add(int i, atomic_t *v)
{
	unsigned int temp;

	__asm__ __volatile__(
	"1:	llock   %0, [%1]	\n"
	"	add     %0, %0, %2	\n"
	"	scond   %0, [%1]	\n"
	"	bnz     1b		\n"
	: "=&r"(temp)	/* Early clobber, to prevent reg reuse */
	: "r"(&v->counter), "ir"(i)
	: "cc");
}

static inline void atomic_sub(int i, atomic_t *v)
{
	unsigned int temp;

	__asm__ __volatile__(
	"1:	llock   %0, [%1]	\n"
	"	sub     %0, %0, %2	\n"
	"	scond   %0, [%1]	\n"
	"	bnz     1b		\n"
	: "=&r"(temp)
	: "r"(&v->counter), "ir"(i)
	: "cc");
}

/* add and also return the new value */
static inline int atomic_add_return(int i, atomic_t *v)
{
	unsigned int temp;

	__asm__ __volatile__(
	"1:	llock   %0, [%1]	\n"
	"	add     %0, %0, %2	\n"
	"	scond   %0, [%1]	\n"
	"	bnz     1b		\n"
	: "=&r"(temp)
	: "r"(&v->counter), "ir"(i)
	: "cc");

	return temp;
}

static inline int atomic_sub_return(int i, atomic_t *v)
{
	unsigned int temp;

	__asm__ __volatile__(
	"1:	llock   %0, [%1]	\n"
	"	sub     %0, %0, %2	\n"
	"	scond   %0, [%1]	\n"
	"	bnz     1b		\n"
	: "=&r"(temp)
	: "r"(&v->counter), "ir"(i)
	: "cc");

	return temp;
}

static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr)
{
	unsigned int temp;

	__asm__ __volatile__(
	"1:	llock   %0, [%1]	\n"
	"	bic     %0, %0, %2	\n"
	"	scond   %0, [%1]	\n"
	"	bnz     1b		\n"
	: "=&r"(temp)
	: "r"(addr), "ir"(mask)
	: "cc");
#define ATOMIC_OP(op, c_op, asm_op)					\
static inline void atomic_##op(int i, atomic_t *v)			\
{									\
	unsigned int temp;						\
									\
	__asm__ __volatile__(						\
	"1:	llock   %0, [%1]	\n"				\
	"	" #asm_op " %0, %0, %2	\n"				\
	"	scond   %0, [%1]	\n"				\
	"	bnz     1b		\n"				\
	: "=&r"(temp)	/* Early clobber, to prevent reg reuse */	\
	: "r"(&v->counter), "ir"(i)					\
	: "cc");							\
}									\

#define ATOMIC_OP_RETURN(op, c_op, asm_op)				\
static inline int atomic_##op##_return(int i, atomic_t *v)		\
{									\
	unsigned int temp;						\
									\
	__asm__ __volatile__(						\
	"1:	llock   %0, [%1]	\n"				\
	"	" #asm_op " %0, %0, %2	\n"				\
	"	scond   %0, [%1]	\n"				\
	"	bnz     1b		\n"				\
	: "=&r"(temp)							\
	: "r"(&v->counter), "ir"(i)					\
	: "cc");							\
									\
	return temp;							\
}

#else	/* !CONFIG_ARC_HAS_LLSC */
@@ -126,6 +83,7 @@ static inline void atomic_set(atomic_t *v, int i)
	v->counter = i;
	atomic_ops_unlock(flags);
}

#endif

/*
@@ -133,62 +91,46 @@ static inline void atomic_set(atomic_t *v, int i)
 * Locking would change to irq-disabling only (UP) and spinlocks (SMP)
 */

static inline void atomic_add(int i, atomic_t *v)
{
	unsigned long flags;

	atomic_ops_lock(flags);
	v->counter += i;
	atomic_ops_unlock(flags);
#define ATOMIC_OP(op, c_op, asm_op)					\
static inline void atomic_##op(int i, atomic_t *v)			\
{									\
	unsigned long flags;						\
									\
	atomic_ops_lock(flags);						\
	v->counter c_op i;						\
	atomic_ops_unlock(flags);					\
}

static inline void atomic_sub(int i, atomic_t *v)
{
	unsigned long flags;

	atomic_ops_lock(flags);
	v->counter -= i;
	atomic_ops_unlock(flags);
#define ATOMIC_OP_RETURN(op, c_op)					\
static inline int atomic_##op##_return(int i, atomic_t *v)		\
{									\
	unsigned long flags;						\
	unsigned long temp;						\
									\
	atomic_ops_lock(flags);						\
	temp = v->counter;						\
	temp c_op i;							\
	v->counter = temp;						\
	atomic_ops_unlock(flags);					\
									\
	return temp;							\
}

static inline int atomic_add_return(int i, atomic_t *v)
{
	unsigned long flags;
	unsigned long temp;

	atomic_ops_lock(flags);
	temp = v->counter;
	temp += i;
	v->counter = temp;
	atomic_ops_unlock(flags);

	return temp;
}

static inline int atomic_sub_return(int i, atomic_t *v)
{
	unsigned long flags;
	unsigned long temp;

	atomic_ops_lock(flags);
	temp = v->counter;
	temp -= i;
	v->counter = temp;
	atomic_ops_unlock(flags);
#endif /* !CONFIG_ARC_HAS_LLSC */

	return temp;
}
#define ATOMIC_OPS(op, c_op, asm_op)					\
	ATOMIC_OP(op, c_op, asm_op)					\
	ATOMIC_OP_RETURN(op, c_op, asm_op)

static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr)
{
	unsigned long flags;
ATOMIC_OPS(add, +=, add)
ATOMIC_OPS(sub, -=, sub)
ATOMIC_OP(and, &=, and)

	atomic_ops_lock(flags);
	*addr &= ~mask;
	atomic_ops_unlock(flags);
}
#define atomic_clear_mask(mask, v) atomic_and(~(mask), (v))

#endif /* !CONFIG_ARC_HAS_LLSC */
#undef ATOMIC_OPS
#undef ATOMIC_OP_RETURN
#undef ATOMIC_OP

/**
 * __atomic_add_unless - add unless the number is a given value