Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 73ada370 authored by Peter Zijlstra's avatar Peter Zijlstra Committed by Thomas Gleixner
Browse files

h8300: Provide atomic_{or,xor,and}



Implement atomic logic ops -- atomic_{or,xor,and}

These will replace the atomic_{set,clear}_mask functions that are
available on some archs.

Also rework the atomic implementation in terms of CPP macros to avoid
the typical repetition -- I seem to have missed this arch the last
time around when I did that.

Signed-off-by: default avatarPeter Zijlstra (Intel) <peterz@infradead.org>
Signed-off-by: default avatarThomas Gleixner <tglx@linutronix.de>
parent b0d8003e
Loading
Loading
Loading
Loading
+40 −95
Original line number Diff line number Diff line
@@ -16,83 +16,54 @@

#include <linux/kernel.h>

static inline int atomic_add_return(int i, atomic_t *v)
{
	h8300flags flags;
	int ret;

	flags = arch_local_irq_save();
	ret = v->counter += i;
	arch_local_irq_restore(flags);
	return ret;
#define ATOMIC_OP_RETURN(op, c_op)				\
static inline int atomic_##op##_return(int i, atomic_t *v)	\
{								\
	h8300flags flags;					\
	int ret;						\
								\
	flags = arch_local_irq_save();				\
	ret = v->counter c_op i;				\
	arch_local_irq_restore(flags);				\
	return ret;						\
}

#define atomic_add(i, v) atomic_add_return(i, v)
#define atomic_add_negative(a, v)	(atomic_add_return((a), (v)) < 0)

static inline int atomic_sub_return(int i, atomic_t *v)
{
	h8300flags flags;
	int ret;

	flags = arch_local_irq_save();
	ret = v->counter -= i;
	arch_local_irq_restore(flags);
	return ret;
#define ATOMIC_OP(op, c_op)					\
static inline void atomic_##op(int i, atomic_t *v)		\
{								\
	h8300flags flags;					\
								\
	flags = arch_local_irq_save();				\
	v->counter c_op i;					\
	arch_local_irq_restore(flags);				\
}

#define atomic_sub(i, v) atomic_sub_return(i, v)
#define atomic_sub_and_test(i, v) (atomic_sub_return(i, v) == 0)

static inline int atomic_inc_return(atomic_t *v)
{
	h8300flags flags;
	int ret;
ATOMIC_OP_RETURN(add, +=)
ATOMIC_OP_RETURN(sub, -=)

	flags = arch_local_irq_save();
	v->counter++;
	ret = v->counter;
	arch_local_irq_restore(flags);
	return ret;
}
#define CONFIG_ARCH_HAS_ATOMIC_OR

#define atomic_inc(v) atomic_inc_return(v)
ATOMIC_OP(and, &=)
ATOMIC_OP(or,  |=)
ATOMIC_OP(xor, ^=)

/*
 * atomic_inc_and_test - increment and test
 * @v: pointer of type atomic_t
 *
 * Atomically increments @v by 1
 * and returns true if the result is zero, or false for all
 * other cases.
 */
#define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
#undef ATOMIC_OP_RETURN
#undef ATOMIC_OP

static inline int atomic_dec_return(atomic_t *v)
{
	h8300flags flags;
	int ret;
#define atomic_add(i, v)		(void)atomic_add_return(i, v)
#define atomic_add_negative(a, v)	(atomic_add_return((a), (v)) < 0)

	flags = arch_local_irq_save();
	--v->counter;
	ret = v->counter;
	arch_local_irq_restore(flags);
	return ret;
}
#define atomic_sub(i, v)		(void)atomic_sub_return(i, v)
#define atomic_sub_and_test(i, v)	(atomic_sub_return(i, v) == 0)

#define atomic_dec(v) atomic_dec_return(v)
#define atomic_inc_return(v)		atomic_add_return(1, v)
#define atomic_dec_return(v)		atomic_sub_return(1, v)

static inline int atomic_dec_and_test(atomic_t *v)
{
	h8300flags flags;
	int ret;
#define atomic_inc(v)			(void)atomic_inc_return(v)
#define atomic_inc_and_test(v)		(atomic_inc_return(v) == 0)

	flags = arch_local_irq_save();
	--v->counter;
	ret = v->counter;
	arch_local_irq_restore(flags);
	return ret == 0;
}
#define atomic_dec(v)			(void)atomic_dec_return(v)
#define atomic_dec_and_test(v)		(atomic_dec_return(v) == 0)

static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
{
@@ -120,40 +91,14 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
	return ret;
}

static inline void atomic_clear_mask(unsigned long mask, unsigned long *v)
static inline __deprecated void atomic_clear_mask(unsigned int mask, atomic_t *v)
{
	unsigned char ccr;
	unsigned long tmp;

	__asm__ __volatile__("stc ccr,%w3\n\t"
			     "orc #0x80,ccr\n\t"
			     "mov.l %0,%1\n\t"
			     "and.l %2,%1\n\t"
			     "mov.l %1,%0\n\t"
			     "ldc %w3,ccr"
			     : "=m"(*v), "=r"(tmp)
			     : "g"(~(mask)), "r"(ccr));
	atomic_and(~mask, v);
}

static inline void atomic_set_mask(unsigned long mask, unsigned long *v)
static inline __deprecated void atomic_set_mask(unsigned int mask, atomic_t *v)
{
	unsigned char ccr;
	unsigned long tmp;

	__asm__ __volatile__("stc ccr,%w3\n\t"
			     "orc #0x80,ccr\n\t"
			     "mov.l %0,%1\n\t"
			     "or.l %2,%1\n\t"
			     "mov.l %1,%0\n\t"
			     "ldc %w3,ccr"
			     : "=m"(*v), "=r"(tmp)
			     : "g"(~(mask)), "r"(ccr));
	atomic_or(mask, v);
}

/* Atomic operations are already serializing */
#define smp_mb__before_atomic_dec()    barrier()
#define smp_mb__after_atomic_dec() barrier()
#define smp_mb__before_atomic_inc()    barrier()
#define smp_mb__after_atomic_inc() barrier()

#endif /* __ARCH_H8300_ATOMIC __ */