Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 7fc1845d authored by Peter Zijlstra's avatar Peter Zijlstra Committed by Thomas Gleixner
Browse files

x86: Provide atomic_{or,xor,and}



Implement atomic logic ops -- atomic_{or,xor,and}.

These will replace the atomic_{set,clear}_mask functions that are
available on some archs.

Signed-off-by: default avatarPeter Zijlstra (Intel) <peterz@infradead.org>
Signed-off-by: default avatarThomas Gleixner <tglx@linutronix.de>
parent ae8c35c8
Loading
Loading
Loading
Loading
+25 −8
Original line number Diff line number Diff line
@@ -182,6 +182,23 @@ static inline int atomic_xchg(atomic_t *v, int new)
	return xchg(&v->counter, new);
}

#define ATOMIC_OP(op)							\
static inline void atomic_##op(int i, atomic_t *v)			\
{									\
	asm volatile(LOCK_PREFIX #op"l %1,%0"				\
			: "+m" (v->counter)				\
			: "ir" (i)					\
			: "memory");					\
}

#define CONFIG_ARCH_HAS_ATOMIC_OR

ATOMIC_OP(and)
ATOMIC_OP(or)
ATOMIC_OP(xor)

#undef ATOMIC_OP

/**
 * __atomic_add_unless - add unless the number is already a given value
 * @v: pointer of type atomic_t
@@ -219,15 +236,15 @@ static __always_inline short int atomic_inc_short(short int *v)
	return *v;
}

/* These are x86-specific, used by some header files */
#define atomic_clear_mask(mask, addr)				\
	asm volatile(LOCK_PREFIX "andl %0,%1"			\
		     : : "r" (~(mask)), "m" (*(addr)) : "memory")
static inline __deprecated void atomic_clear_mask(unsigned int mask, atomic_t *v)
{
	atomic_and(~mask, v);
}

#define atomic_set_mask(mask, addr)				\
	asm volatile(LOCK_PREFIX "orl %0,%1"			\
		     : : "r" ((unsigned)(mask)), "m" (*(addr))	\
		     : "memory")
static inline __deprecated void atomic_set_mask(unsigned int mask, atomic_t *v)
{
	atomic_or(mask, v);
}

#ifdef CONFIG_X86_32
# include <asm/atomic64_32.h>
+14 −0
Original line number Diff line number Diff line
@@ -313,4 +313,18 @@ static inline long long atomic64_dec_if_positive(atomic64_t *v)
#undef alternative_atomic64
#undef __alternative_atomic64

#define ATOMIC64_OP(op, c_op)						\
static inline void atomic64_##op(long long i, atomic64_t *v)		\
{									\
	long long old, c = 0;						\
	while ((old = atomic64_cmpxchg(v, c, c c_op i)) != c)		\
		c = old;						\
}

ATOMIC64_OP(and, &)
ATOMIC64_OP(or, |)
ATOMIC64_OP(xor, ^)

#undef ATOMIC64_OP

#endif /* _ASM_X86_ATOMIC64_32_H */
+15 −0
Original line number Diff line number Diff line
@@ -220,4 +220,19 @@ static inline long atomic64_dec_if_positive(atomic64_t *v)
	return dec;
}

#define ATOMIC64_OP(op)							\
static inline void atomic64_##op(long i, atomic64_t *v)			\
{									\
	asm volatile(LOCK_PREFIX #op"q %1,%0"				\
			: "+m" (v->counter)				\
			: "er" (i)					\
			: "memory");					\
}

ATOMIC64_OP(and)
ATOMIC64_OP(or)
ATOMIC64_OP(xor)

#undef ATOMIC64_OP

#endif /* _ASM_X86_ATOMIC64_64_H */