Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit d6dfe250 authored by Peter Zijlstra's avatar Peter Zijlstra Committed by Ingo Molnar
Browse files

locking,arch,metag: Fold atomic_ops



Many of the atomic op implementations are the same except for one
instruction; fold the lot into a few CPP macros and reduce LoC.

This also prepares for easy addition of new ops.

Signed-off-by: default avatarPeter Zijlstra <peterz@infradead.org>
Acked-by: default avatarJames Hogan <james.hogan@imgtec.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: linux-metag@vger.kernel.org
Link: http://lkml.kernel.org/r/20140508135852.453864110@infradead.org


Signed-off-by: default avatarIngo Molnar <mingo@kernel.org>
parent d839bae4
Loading
Loading
Loading
Loading
+46 −75
Original line number Diff line number Diff line
@@ -27,85 +27,56 @@ static inline int atomic_read(const atomic_t *v)
	return temp;
}

static inline void atomic_add(int i, atomic_t *v)
{
	int temp;

	asm volatile (
		"1:	LNKGETD %0, [%1]\n"
		"	ADD	%0, %0, %2\n"
		"	LNKSETD [%1], %0\n"
		"	DEFR	%0, TXSTAT\n"
		"	ANDT	%0, %0, #HI(0x3f000000)\n"
		"	CMPT	%0, #HI(0x02000000)\n"
		"	BNZ	1b\n"
		: "=&d" (temp)
		: "da" (&v->counter), "bd" (i)
		: "cc");
#define ATOMIC_OP(op)							\
static inline void atomic_##op(int i, atomic_t *v)			\
{									\
	int temp;							\
									\
	asm volatile (							\
		"1:	LNKGETD %0, [%1]\n"				\
		"	" #op "	%0, %0, %2\n"				\
		"	LNKSETD [%1], %0\n"				\
		"	DEFR	%0, TXSTAT\n"				\
		"	ANDT	%0, %0, #HI(0x3f000000)\n"		\
		"	CMPT	%0, #HI(0x02000000)\n"			\
		"	BNZ	1b\n"					\
		: "=&d" (temp)						\
		: "da" (&v->counter), "bd" (i)				\
		: "cc");						\
}									\

#define ATOMIC_OP_RETURN(op)						\
static inline int atomic_##op##_return(int i, atomic_t *v)		\
{									\
	int result, temp;						\
									\
	smp_mb();							\
									\
	asm volatile (							\
		"1:	LNKGETD %1, [%2]\n"				\
		"	" #op "	%1, %1, %3\n"				\
		"	LNKSETD [%2], %1\n"				\
		"	DEFR	%0, TXSTAT\n"				\
		"	ANDT	%0, %0, #HI(0x3f000000)\n"		\
		"	CMPT	%0, #HI(0x02000000)\n"			\
		"	BNZ 1b\n"					\
		: "=&d" (temp), "=&da" (result)				\
		: "da" (&v->counter), "bd" (i)				\
		: "cc");						\
									\
	smp_mb();							\
									\
	return result;							\
}

static inline void atomic_sub(int i, atomic_t *v)
{
	int temp;
#define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_OP_RETURN(op)

	asm volatile (
		"1:	LNKGETD %0, [%1]\n"
		"	SUB	%0, %0, %2\n"
		"	LNKSETD [%1], %0\n"
		"	DEFR	%0, TXSTAT\n"
		"	ANDT	%0, %0, #HI(0x3f000000)\n"
		"	CMPT	%0, #HI(0x02000000)\n"
		"	BNZ 1b\n"
		: "=&d" (temp)
		: "da" (&v->counter), "bd" (i)
		: "cc");
}

static inline int atomic_add_return(int i, atomic_t *v)
{
	int result, temp;
ATOMIC_OPS(add)
ATOMIC_OPS(sub)

	smp_mb();

	asm volatile (
		"1:	LNKGETD %1, [%2]\n"
		"	ADD	%1, %1, %3\n"
		"	LNKSETD [%2], %1\n"
		"	DEFR	%0, TXSTAT\n"
		"	ANDT	%0, %0, #HI(0x3f000000)\n"
		"	CMPT	%0, #HI(0x02000000)\n"
		"	BNZ 1b\n"
		: "=&d" (temp), "=&da" (result)
		: "da" (&v->counter), "bd" (i)
		: "cc");

	smp_mb();

	return result;
}

static inline int atomic_sub_return(int i, atomic_t *v)
{
	int result, temp;

	smp_mb();

	asm volatile (
		"1:	LNKGETD %1, [%2]\n"
		"	SUB	%1, %1, %3\n"
		"	LNKSETD [%2], %1\n"
		"	DEFR	%0, TXSTAT\n"
		"	ANDT	%0, %0, #HI(0x3f000000)\n"
		"	CMPT	%0, #HI(0x02000000)\n"
		"	BNZ	1b\n"
		: "=&d" (temp), "=&da" (result)
		: "da" (&v->counter), "bd" (i)
		: "cc");

	smp_mb();

	return result;
}
#undef ATOMIC_OPS
#undef ATOMIC_OP_RETURN
#undef ATOMIC_OP

static inline void atomic_clear_mask(unsigned int mask, atomic_t *v)
{
+31 −45
Original line number Diff line number Diff line
@@ -37,55 +37,41 @@ static inline int atomic_set(atomic_t *v, int i)
	return i;
}

static inline void atomic_add(int i, atomic_t *v)
{
	unsigned long flags;

	__global_lock1(flags);
	fence();
	v->counter += i;
	__global_unlock1(flags);
#define ATOMIC_OP(op, c_op)						\
static inline void atomic_##op(int i, atomic_t *v)			\
{									\
	unsigned long flags;						\
									\
	__global_lock1(flags);						\
	fence();							\
	v->counter c_op i;						\
	__global_unlock1(flags);					\
}									\

#define ATOMIC_OP_RETURN(op, c_op)					\
static inline int atomic_##op##_return(int i, atomic_t *v)		\
{									\
	unsigned long result;						\
	unsigned long flags;						\
									\
	__global_lock1(flags);						\
	result = v->counter;						\
	result c_op i;							\
	fence();							\
	v->counter = result;						\
	__global_unlock1(flags);					\
									\
	return result;							\
}

static inline void atomic_sub(int i, atomic_t *v)
{
	unsigned long flags;
#define ATOMIC_OPS(op, c_op) ATOMIC_OP(op, c_op) ATOMIC_OP_RETURN(op, c_op)

	__global_lock1(flags);
	fence();
	v->counter -= i;
	__global_unlock1(flags);
}

static inline int atomic_add_return(int i, atomic_t *v)
{
	unsigned long result;
	unsigned long flags;
ATOMIC_OPS(add, +=)
ATOMIC_OPS(sub, -=)

	__global_lock1(flags);
	result = v->counter;
	result += i;
	fence();
	v->counter = result;
	__global_unlock1(flags);

	return result;
}

static inline int atomic_sub_return(int i, atomic_t *v)
{
	unsigned long result;
	unsigned long flags;

	__global_lock1(flags);
	result = v->counter;
	result -= i;
	fence();
	v->counter = result;
	__global_unlock1(flags);

	return result;
}
#undef ATOMIC_OPS
#undef ATOMIC_OP_RETURN
#undef ATOMIC_OP

static inline void atomic_clear_mask(unsigned int mask, atomic_t *v)
{