Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit b7271b9f authored by Peter Zijlstra's avatar Peter Zijlstra Committed by Ingo Molnar
Browse files

locking/atomic, arch/tile: Fix tilepro build



The tilepro change wasn't ever compiled it seems (the 0day built bot
also doesn't have a toolchain for it).

Make it work.

The thing that makes the patch bigger than desired is namespace
collision with the C11 __atomic builtin functions. So rename the
tilepro functions to __atomic32.

Reported-by: default avatarSudip Mukherjee <sudipm.mukherjee@gmail.com>
Signed-off-by: default avatarPeter Zijlstra (Intel) <peterz@infradead.org>
Acked-by: default avatarChris Metcalf <cmetcalf@mellanox.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Stephen Rothwell <sfr@canb.auug.org.au>
Cc: Thomas Gleixner <tglx@linutronix.de>
Fixes: 1af5de9a ("locking/atomic, arch/tile: Implement atomic{,64}_fetch_{add,sub,and,or,xor}()")
Link: http://lkml.kernel.org/r/20160622091649.GB30154@twins.programming.kicks-ass.net


Signed-off-by: default avatarIngo Molnar <mingo@kernel.org>
parent 86a664d5
Loading
Loading
Loading
Loading
+12 −12
Original line number Diff line number Diff line
@@ -143,15 +143,15 @@ static inline void atomic64_##op(long long i, atomic64_t *v) \
{								\
	_atomic64_fetch_##op(&v->counter, i);			\
}								\
static inline void atomic64_##op(long long i, atomic64_t *v)	\
static inline long long atomic64_fetch_##op(long long i, atomic64_t *v)	\
{								\
	smp_mb();						\
	return _atomic64_fetch_##op(&v->counter, i);		\
}

ATOMIC64_OP(and)
ATOMIC64_OP(or)
ATOMIC64_OP(xor)
ATOMIC64_OPS(and)
ATOMIC64_OPS(or)
ATOMIC64_OPS(xor)

#undef ATOMIC64_OPS

@@ -266,16 +266,16 @@ struct __get_user {
	unsigned long val;
	int err;
};
extern struct __get_user __atomic_cmpxchg(volatile int *p,
extern struct __get_user __atomic32_cmpxchg(volatile int *p,
					  int *lock, int o, int n);
extern struct __get_user __atomic_xchg(volatile int *p, int *lock, int n);
extern struct __get_user __atomic_xchg_add(volatile int *p, int *lock, int n);
extern struct __get_user __atomic_xchg_add_unless(volatile int *p,
extern struct __get_user __atomic32_xchg(volatile int *p, int *lock, int n);
extern struct __get_user __atomic32_xchg_add(volatile int *p, int *lock, int n);
extern struct __get_user __atomic32_xchg_add_unless(volatile int *p,
						  int *lock, int o, int n);
extern struct __get_user __atomic_fetch_or(volatile int *p, int *lock, int n);
extern struct __get_user __atomic_fetch_and(volatile int *p, int *lock, int n);
extern struct __get_user __atomic_fetch_andn(volatile int *p, int *lock, int n);
extern struct __get_user __atomic_fetch_xor(volatile int *p, int *lock, int n);
extern struct __get_user __atomic32_fetch_or(volatile int *p, int *lock, int n);
extern struct __get_user __atomic32_fetch_and(volatile int *p, int *lock, int n);
extern struct __get_user __atomic32_fetch_andn(volatile int *p, int *lock, int n);
extern struct __get_user __atomic32_fetch_xor(volatile int *p, int *lock, int n);
extern long long __atomic64_cmpxchg(volatile long long *p, int *lock,
					long long o, long long n);
extern long long __atomic64_xchg(volatile long long *p, int *lock, long long n);
+7 −7
Original line number Diff line number Diff line
@@ -80,15 +80,15 @@
		ret = gu.err;						\
	}

#define __futex_set() __futex_call(__atomic_xchg)
#define __futex_add() __futex_call(__atomic_xchg_add)
#define __futex_or() __futex_call(__atomic_or)
#define __futex_andn() __futex_call(__atomic_andn)
#define __futex_xor() __futex_call(__atomic_xor)
#define __futex_set() __futex_call(__atomic32_xchg)
#define __futex_add() __futex_call(__atomic32_xchg_add)
#define __futex_or() __futex_call(__atomic32_fetch_or)
#define __futex_andn() __futex_call(__atomic32_fetch_andn)
#define __futex_xor() __futex_call(__atomic32_fetch_xor)

#define __futex_cmpxchg()						\
	{								\
		struct __get_user gu = __atomic_cmpxchg((u32 __force *)uaddr, \
		struct __get_user gu = __atomic32_cmpxchg((u32 __force *)uaddr, \
							  lock, oldval, oparg); \
		val = gu.val;						\
		ret = gu.err;						\
+8 −8
Original line number Diff line number Diff line
@@ -61,13 +61,13 @@ static inline int *__atomic_setup(volatile void *v)

int _atomic_xchg(int *v, int n)
{
	return __atomic_xchg(v, __atomic_setup(v), n).val;
	return __atomic32_xchg(v, __atomic_setup(v), n).val;
}
EXPORT_SYMBOL(_atomic_xchg);

int _atomic_xchg_add(int *v, int i)
{
	return __atomic_xchg_add(v, __atomic_setup(v), i).val;
	return __atomic32_xchg_add(v, __atomic_setup(v), i).val;
}
EXPORT_SYMBOL(_atomic_xchg_add);

@@ -78,37 +78,37 @@ int _atomic_xchg_add_unless(int *v, int a, int u)
	 * to use the first argument consistently as the "old value"
	 * in the assembly, as is done for _atomic_cmpxchg().
	 */
	return __atomic_xchg_add_unless(v, __atomic_setup(v), u, a).val;
	return __atomic32_xchg_add_unless(v, __atomic_setup(v), u, a).val;
}
EXPORT_SYMBOL(_atomic_xchg_add_unless);

int _atomic_cmpxchg(int *v, int o, int n)
{
	return __atomic_cmpxchg(v, __atomic_setup(v), o, n).val;
	return __atomic32_cmpxchg(v, __atomic_setup(v), o, n).val;
}
EXPORT_SYMBOL(_atomic_cmpxchg);

unsigned long _atomic_fetch_or(volatile unsigned long *p, unsigned long mask)
{
	return __atomic_fetch_or((int *)p, __atomic_setup(p), mask).val;
	return __atomic32_fetch_or((int *)p, __atomic_setup(p), mask).val;
}
EXPORT_SYMBOL(_atomic_fetch_or);

unsigned long _atomic_fetch_and(volatile unsigned long *p, unsigned long mask)
{
	return __atomic_fetch_and((int *)p, __atomic_setup(p), mask).val;
	return __atomic32_fetch_and((int *)p, __atomic_setup(p), mask).val;
}
EXPORT_SYMBOL(_atomic_fetch_and);

unsigned long _atomic_fetch_andn(volatile unsigned long *p, unsigned long mask)
{
	return __atomic_fetch_andn((int *)p, __atomic_setup(p), mask).val;
	return __atomic32_fetch_andn((int *)p, __atomic_setup(p), mask).val;
}
EXPORT_SYMBOL(_atomic_fetch_andn);

unsigned long _atomic_fetch_xor(volatile unsigned long *p, unsigned long mask)
{
	return __atomic_fetch_xor((int *)p, __atomic_setup(p), mask).val;
	return __atomic32_fetch_xor((int *)p, __atomic_setup(p), mask).val;
}
EXPORT_SYMBOL(_atomic_fetch_xor);

+13 −8
Original line number Diff line number Diff line
@@ -172,15 +172,20 @@ STD_ENTRY_SECTION(__atomic\name, .text.atomic)
	.endif
	.endm

atomic_op _cmpxchg, 32, "seq r26, r22, r2; { bbns r26, 3f; move r24, r3 }"
atomic_op _xchg, 32, "move r24, r2"
atomic_op _xchg_add, 32, "add r24, r22, r2"
atomic_op _xchg_add_unless, 32, \

/*
 * Use __atomic32 prefix to avoid collisions with GCC builtin __atomic functions.
 */

atomic_op 32_cmpxchg, 32, "seq r26, r22, r2; { bbns r26, 3f; move r24, r3 }"
atomic_op 32_xchg, 32, "move r24, r2"
atomic_op 32_xchg_add, 32, "add r24, r22, r2"
atomic_op 32_xchg_add_unless, 32, \
	"sne r26, r22, r2; { bbns r26, 3f; add r24, r22, r3 }"
atomic_op _fetch_or, 32, "or r24, r22, r2"
atomic_op _fetch_and, 32, "and r24, r22, r2"
atomic_op _fetch_andn, 32, "nor r2, r2, zero; and r24, r22, r2"
atomic_op _fetch_xor, 32, "xor r24, r22, r2"
atomic_op 32_fetch_or, 32, "or r24, r22, r2"
atomic_op 32_fetch_and, 32, "and r24, r22, r2"
atomic_op 32_fetch_andn, 32, "nor r2, r2, zero; and r24, r22, r2"
atomic_op 32_fetch_xor, 32, "xor r24, r22, r2"

atomic_op 64_cmpxchg, 64, "{ seq r26, r22, r2; seq r27, r23, r3 }; \
	{ bbns r26, 3f; move r24, r4 }; { bbns r27, 3f; move r25, r5 }"