Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit ba1c9f83 authored by Dmitry Vyukov's avatar Dmitry Vyukov Committed by Ingo Molnar
Browse files

locking/atomic/x86: Un-macro-ify atomic ops implementation



CPP turns perfectly readable code into a much harder to read syntactic soup.

Ingo suggested to write them out as-is in C and ignore the higher linecount.

Do this.

(As a side effect, plain C functions will be easier to KASAN-instrument as well.)

Suggested-by: default avatarIngo Molnar <mingo@kernel.org>
Signed-off-by: default avatarDmitry Vyukov <dvyukov@google.com>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Andrey Ryabinin <aryabinin@virtuozzo.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Mark Rutland <mark.rutland@arm.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: kasan-dev@googlegroups.com
Cc: will.deacon@arm.com
Link: http://lkml.kernel.org/r/a35b983dd3be937a3cf63c4e2db487de2cdc7b8f.1497690003.git.dvyukov@google.com


[ Beautified the C code some more and twiddled the changelog
  to mention the linecount increase and the KASAN benefit. ]
Signed-off-by: default avatarIngo Molnar <mingo@kernel.org>
parent fd25d19f
Loading
Loading
Loading
Loading
+46 −23
Original line number Original line Diff line number Diff line
@@ -197,35 +197,56 @@ static inline int atomic_xchg(atomic_t *v, int new)
	return xchg(&v->counter, new);
	return xchg(&v->counter, new);
}
}


#define ATOMIC_OP(op)							\
static inline void atomic_and(int i, atomic_t *v)
static inline void atomic_##op(int i, atomic_t *v)			\
{
{									\
	asm volatile(LOCK_PREFIX "andl %1,%0"
	asm volatile(LOCK_PREFIX #op"l %1,%0"				\
			: "+m" (v->counter)
			: "+m" (v->counter)				\
			: "ir" (i)
			: "ir" (i)					\
			: "memory");
			: "memory");					\
}

static inline int atomic_fetch_and(int i, atomic_t *v)
{
	int val = atomic_read(v);

	do { } while (!atomic_try_cmpxchg(v, &val, val & i));

	return val;
}

static inline void atomic_or(int i, atomic_t *v)
{
	asm volatile(LOCK_PREFIX "orl %1,%0"
			: "+m" (v->counter)
			: "ir" (i)
			: "memory");
}
}


#define ATOMIC_FETCH_OP(op, c_op)					\
static inline int atomic_fetch_or(int i, atomic_t *v)
static inline int atomic_fetch_##op(int i, atomic_t *v)			\
{
{									\
	int val = atomic_read(v);
	int val = atomic_read(v);					\

	do {								\
	do { } while (!atomic_try_cmpxchg(v, &val, val | i));
	} while (!atomic_try_cmpxchg(v, &val, val c_op i));		\

	return val;							\
	return val;
}
}


#define ATOMIC_OPS(op, c_op)						\
static inline void atomic_xor(int i, atomic_t *v)
	ATOMIC_OP(op)							\
{
	ATOMIC_FETCH_OP(op, c_op)
	asm volatile(LOCK_PREFIX "xorl %1,%0"
			: "+m" (v->counter)
			: "ir" (i)
			: "memory");
}


ATOMIC_OPS(and, &)
static inline int atomic_fetch_xor(int i, atomic_t *v)
ATOMIC_OPS(or , |)
{
ATOMIC_OPS(xor, ^)
	int val = atomic_read(v);

	do { } while (!atomic_try_cmpxchg(v, &val, val ^ i));


#undef ATOMIC_OPS
	return val;
#undef ATOMIC_FETCH_OP
}
#undef ATOMIC_OP


/**
/**
 * __atomic_add_unless - add unless the number is already a given value
 * __atomic_add_unless - add unless the number is already a given value
@@ -239,10 +260,12 @@ ATOMIC_OPS(xor, ^)
static __always_inline int __atomic_add_unless(atomic_t *v, int a, int u)
static __always_inline int __atomic_add_unless(atomic_t *v, int a, int u)
{
{
	int c = atomic_read(v);
	int c = atomic_read(v);

	do {
	do {
		if (unlikely(c == u))
		if (unlikely(c == u))
			break;
			break;
	} while (!atomic_try_cmpxchg(v, &c, c + a));
	} while (!atomic_try_cmpxchg(v, &c, c + a));

	return c;
	return c;
}
}


+57 −24
Original line number Original line Diff line number Diff line
@@ -312,37 +312,70 @@ static inline long long atomic64_dec_if_positive(atomic64_t *v)
#undef alternative_atomic64
#undef alternative_atomic64
#undef __alternative_atomic64
#undef __alternative_atomic64


#define ATOMIC64_OP(op, c_op)						\
static inline void atomic64_and(long long i, atomic64_t *v)
static inline void atomic64_##op(long long i, atomic64_t *v)		\
{
{									\
	long long old, c = 0;
	long long old, c = 0;						\

	while ((old = atomic64_cmpxchg(v, c, c c_op i)) != c)		\
	while ((old = atomic64_cmpxchg(v, c, c & i)) != c)
		c = old;						\
		c = old;
}
}


#define ATOMIC64_FETCH_OP(op, c_op)					\
static inline long long atomic64_fetch_and(long long i, atomic64_t *v)
static inline long long atomic64_fetch_##op(long long i, atomic64_t *v)	\
{
{									\
	long long old, c = 0;
	long long old, c = 0;						\

	while ((old = atomic64_cmpxchg(v, c, c c_op i)) != c)		\
	while ((old = atomic64_cmpxchg(v, c, c & i)) != c)
		c = old;						\
		c = old;
	return old;							\

	return old;
}
}


ATOMIC64_FETCH_OP(add, +)
static inline void atomic64_or(long long i, atomic64_t *v)
{
	long long old, c = 0;


#define atomic64_fetch_sub(i, v)	atomic64_fetch_add(-(i), (v))
	while ((old = atomic64_cmpxchg(v, c, c | i)) != c)
		c = old;
}

static inline long long atomic64_fetch_or(long long i, atomic64_t *v)
{
	long long old, c = 0;

	while ((old = atomic64_cmpxchg(v, c, c | i)) != c)
		c = old;

	return old;
}


#define ATOMIC64_OPS(op, c_op)						\
static inline void atomic64_xor(long long i, atomic64_t *v)
	ATOMIC64_OP(op, c_op)						\
{
	ATOMIC64_FETCH_OP(op, c_op)
	long long old, c = 0;

	while ((old = atomic64_cmpxchg(v, c, c ^ i)) != c)
		c = old;
}


ATOMIC64_OPS(and, &)
static inline long long atomic64_fetch_xor(long long i, atomic64_t *v)
ATOMIC64_OPS(or, |)
{
ATOMIC64_OPS(xor, ^)
	long long old, c = 0;

	while ((old = atomic64_cmpxchg(v, c, c ^ i)) != c)
		c = old;

	return old;
}


#undef ATOMIC64_OPS
static inline long long atomic64_fetch_add(long long i, atomic64_t *v)
#undef ATOMIC64_FETCH_OP
{
#undef ATOMIC64_OP
	long long old, c = 0;

	while ((old = atomic64_cmpxchg(v, c, c + i)) != c)
		c = old;

	return old;
}

#define atomic64_fetch_sub(i, v)	atomic64_fetch_add(-(i), (v))


#endif /* _ASM_X86_ATOMIC64_32_H */
#endif /* _ASM_X86_ATOMIC64_32_H */
+44 −23
Original line number Original line Diff line number Diff line
@@ -226,34 +226,55 @@ static inline long atomic64_dec_if_positive(atomic64_t *v)
	return dec;
	return dec;
}
}


#define ATOMIC64_OP(op)							\
static inline void atomic64_and(long i, atomic64_t *v)
static inline void atomic64_##op(long i, atomic64_t *v)			\
{
{									\
	asm volatile(LOCK_PREFIX "andq %1,%0"
	asm volatile(LOCK_PREFIX #op"q %1,%0"				\
			: "+m" (v->counter)
			: "+m" (v->counter)				\
			: "er" (i)
			: "er" (i)					\
			: "memory");
			: "memory");					\
}
}


#define ATOMIC64_FETCH_OP(op, c_op)					\
static inline long atomic64_fetch_and(long i, atomic64_t *v)
static inline long atomic64_fetch_##op(long i, atomic64_t *v)		\
{
{									\
	long val = atomic64_read(v);
	long val = atomic64_read(v);					\

	do {								\
	do {
	} while (!atomic64_try_cmpxchg(v, &val, val c_op i));		\
	} while (!atomic64_try_cmpxchg(v, &val, val & i));
	return val;							\
	return val;
}
}


#define ATOMIC64_OPS(op, c_op)						\
static inline void atomic64_or(long i, atomic64_t *v)
	ATOMIC64_OP(op)							\
{
	ATOMIC64_FETCH_OP(op, c_op)
	asm volatile(LOCK_PREFIX "orq %1,%0"
			: "+m" (v->counter)
			: "er" (i)
			: "memory");
}


ATOMIC64_OPS(and, &)
static inline long atomic64_fetch_or(long i, atomic64_t *v)
ATOMIC64_OPS(or, |)
{
ATOMIC64_OPS(xor, ^)
	long val = atomic64_read(v);


#undef ATOMIC64_OPS
	do {
#undef ATOMIC64_FETCH_OP
	} while (!atomic64_try_cmpxchg(v, &val, val | i));
#undef ATOMIC64_OP
	return val;
}

static inline void atomic64_xor(long i, atomic64_t *v)
{
	asm volatile(LOCK_PREFIX "xorq %1,%0"
			: "+m" (v->counter)
			: "er" (i)
			: "memory");
}

static inline long atomic64_fetch_xor(long i, atomic64_t *v)
{
	long val = atomic64_read(v);

	do {
	} while (!atomic64_try_cmpxchg(v, &val, val ^ i));
	return val;
}


#endif /* _ASM_X86_ATOMIC64_64_H */
#endif /* _ASM_X86_ATOMIC64_64_H */