Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 31a8394e authored by Jeremy Fitzhardinge's avatar Jeremy Fitzhardinge
Browse files

x86: consolidate xchg and xadd macros



They both have a basic "put new value in location, return old value"
pattern, so they can use the same macro easily.

Signed-off-by: default avatarJeremy Fitzhardinge <jeremy@goop.org>
parent 3d94ae0c
Loading
Loading
Loading
Loading
+36 −78
Original line number Diff line number Diff line
@@ -34,59 +34,46 @@ extern void __add_wrong_size(void)
#endif

/* 
 * Note: no "lock" prefix even on SMP: xchg always implies lock anyway.
 * Since this is generally used to protect other memory information, we
 * use "asm volatile" and "memory" clobbers to prevent gcc from moving
 * information around.
 * An exchange-type operation, which takes a value and a pointer, and
 * returns a the old value.
 */
#define __xchg(x, ptr, size)						\
#define __xchg_op(ptr, arg, op, lock)					\
	({								\
	__typeof(*(ptr)) __x = (x);					\
	switch (size) {							\
	        __typeof__ (*(ptr)) __ret = (arg);			\
		switch (sizeof(*(ptr))) {				\
		case __X86_CASE_B:					\
	{								\
		volatile u8 *__ptr = (volatile u8 *)(ptr);		\
		asm volatile("xchgb %0,%1"				\
			     : "=q" (__x), "+m" (*__ptr)		\
			     : "0" (__x)				\
			     : "memory");				\
			asm volatile (lock #op "b %b0, %1\n"		\
				      : "+r" (__ret), "+m" (*(ptr))	\
				      : : "memory", "cc");		\
			break;						\
	}								\
		case __X86_CASE_W:					\
	{								\
		volatile u16 *__ptr = (volatile u16 *)(ptr);		\
		asm volatile("xchgw %0,%1"				\
			     : "=r" (__x), "+m" (*__ptr)		\
			     : "0" (__x)				\
			     : "memory");				\
			asm volatile (lock #op "w %w0, %1\n"		\
				      : "+r" (__ret), "+m" (*(ptr))	\
				      : : "memory", "cc");		\
			break;						\
	}								\
		case __X86_CASE_L:					\
	{								\
		volatile u32 *__ptr = (volatile u32 *)(ptr);		\
		asm volatile("xchgl %0,%1"				\
			     : "=r" (__x), "+m" (*__ptr)		\
			     : "0" (__x)				\
			     : "memory");				\
			asm volatile (lock #op "l %0, %1\n"		\
				      : "+r" (__ret), "+m" (*(ptr))	\
				      : : "memory", "cc");		\
			break;						\
	}								\
		case __X86_CASE_Q:					\
	{								\
		volatile u64 *__ptr = (volatile u64 *)(ptr);		\
		asm volatile("xchgq %0,%1"				\
			     : "=r" (__x), "+m" (*__ptr)		\
			     : "0" (__x)				\
			     : "memory");				\
			asm volatile (lock #op "q %q0, %1\n"		\
				      : "+r" (__ret), "+m" (*(ptr))	\
				      : : "memory", "cc");		\
			break;						\
	}								\
		default:						\
		__xchg_wrong_size();					\
			__ ## op ## _wrong_size();			\
		}							\
	__x;								\
		__ret;							\
	})

#define xchg(ptr, v)							\
	__xchg((v), (ptr), sizeof(*ptr))
/*
 * Note: no "lock" prefix even on SMP: xchg always implies lock anyway.
 * Since this is generally used to protect other memory information, we
 * use "asm volatile" and "memory" clobbers to prevent gcc from moving
 * information around.
 */
#define xchg(ptr, v)	__xchg_op((ptr), (v), xchg, "")

/*
 * Atomic compare and exchange.  Compare OLD with MEM, if identical,
@@ -167,36 +154,6 @@ extern void __add_wrong_size(void)
	__cmpxchg_local((ptr), (old), (new), sizeof(*ptr))
#endif

#define __xadd(ptr, inc, lock)						\
	({								\
	        __typeof__ (*(ptr)) __ret = (inc);			\
		switch (sizeof(*(ptr))) {				\
		case __X86_CASE_B:					\
			asm volatile (lock "xaddb %b0, %1\n"		\
				      : "+r" (__ret), "+m" (*(ptr))	\
				      : : "memory", "cc");		\
			break;						\
		case __X86_CASE_W:					\
			asm volatile (lock "xaddw %w0, %1\n"		\
				      : "+r" (__ret), "+m" (*(ptr))	\
				      : : "memory", "cc");		\
			break;						\
		case __X86_CASE_L:					\
			asm volatile (lock "xaddl %0, %1\n"		\
				      : "+r" (__ret), "+m" (*(ptr))	\
				      : : "memory", "cc");		\
			break;						\
		case __X86_CASE_Q:					\
			asm volatile (lock "xaddq %q0, %1\n"		\
				      : "+r" (__ret), "+m" (*(ptr))	\
				      : : "memory", "cc");		\
			break;						\
		default:						\
			__xadd_wrong_size();				\
		}							\
		__ret;							\
	})

/*
 * xadd() adds "inc" to "*ptr" and atomically returns the previous
 * value of "*ptr".
@@ -205,6 +162,7 @@ extern void __add_wrong_size(void)
 * xadd_sync() is always locked
 * xadd_local() is never locked
 */
#define __xadd(ptr, inc, lock)	__xchg_op((ptr), (inc), xadd, lock)
#define xadd(ptr, inc)		__xadd((ptr), (inc), LOCK_PREFIX)
#define xadd_sync(ptr, inc)	__xadd((ptr), (inc), "lock; ")
#define xadd_local(ptr, inc)	__xadd((ptr), (inc), "")