Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 6006c0d8 authored by James Hogan's avatar James Hogan
Browse files

metag: Atomics, locks and bitops



Add header files to implement Meta hardware thread locks (used by some
other atomic operations), atomics, spinlocks, and bitops.

There are 2 main types of atomic primitives for metag (in addition to
IRQs off on UP):
 - LOCK instructions provide locking between hardware threads.
 - LNKGET/LNKSET instructions provide load-linked/store-conditional
   operations allowing for lighter weight atomics on Meta2

LOCK instructions allow for hardware threads to acquire voluntary or
exclusive hardware thread locks:
 - LOCK0 releases exclusive and voluntary lock from the running hardware
   thread.
 - LOCK1 acquires the voluntary hardware lock, blocking until it becomes
   available.
 - LOCK2 implies LOCK1, and additionally acquires the exclusive hardware
   lock, blocking all other hardware threads from executing.

Signed-off-by: default avatarJames Hogan <james.hogan@imgtec.com>
parent 9b802d1f
Loading
Loading
Loading
Loading
+53 −0
Original line number Original line Diff line number Diff line
#ifndef __ASM_METAG_ATOMIC_H
#define __ASM_METAG_ATOMIC_H

#include <linux/compiler.h>
#include <linux/types.h>
#include <asm/cmpxchg.h>

#if defined(CONFIG_METAG_ATOMICITY_IRQSOFF)
/* The simple UP case. */
#include <asm-generic/atomic.h>
#else

#if defined(CONFIG_METAG_ATOMICITY_LOCK1)
#include <asm/atomic_lock1.h>
#else
#include <asm/atomic_lnkget.h>
#endif

#define atomic_add_negative(a, v)       (atomic_add_return((a), (v)) < 0)

#define atomic_dec_return(v) atomic_sub_return(1, (v))
#define atomic_inc_return(v) atomic_add_return(1, (v))

/*
 * atomic_inc_and_test - increment and test
 * @v: pointer of type atomic_t
 *
 * Atomically increments @v by 1
 * and returns true if the result is zero, or false for all
 * other cases.
 */
#define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)

#define atomic_sub_and_test(i, v) (atomic_sub_return((i), (v)) == 0)
#define atomic_dec_and_test(v) (atomic_sub_return(1, (v)) == 0)

#define atomic_inc(v) atomic_add(1, (v))
#define atomic_dec(v) atomic_sub(1, (v))

#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)

#define smp_mb__before_atomic_dec()	barrier()
#define smp_mb__after_atomic_dec()	barrier()
#define smp_mb__before_atomic_inc()	barrier()
#define smp_mb__after_atomic_inc()	barrier()

#endif

#define atomic_dec_if_positive(v)       atomic_sub_if_positive(1, v)

#include <asm-generic/atomic64.h>

#endif /* __ASM_METAG_ATOMIC_H */
+234 −0
Original line number Original line Diff line number Diff line
#ifndef __ASM_METAG_ATOMIC_LNKGET_H
#define __ASM_METAG_ATOMIC_LNKGET_H

#define ATOMIC_INIT(i)	{ (i) }

#define atomic_set(v, i)		((v)->counter = (i))

#include <linux/compiler.h>

#include <asm/barrier.h>

/*
 * None of these asm statements clobber memory as LNKSET writes around
 * the cache so the memory it modifies cannot safely be read by any means
 * other than these accessors.
 */

static inline int atomic_read(const atomic_t *v)
{
	int temp;

	asm volatile (
		"LNKGETD %0, [%1]\n"
		: "=da" (temp)
		: "da" (&v->counter));

	return temp;
}

static inline void atomic_add(int i, atomic_t *v)
{
	int temp;

	asm volatile (
		"1:	LNKGETD %0, [%1]\n"
		"	ADD	%0, %0, %2\n"
		"	LNKSETD [%1], %0\n"
		"	DEFR	%0, TXSTAT\n"
		"	ANDT	%0, %0, #HI(0x3f000000)\n"
		"	CMPT	%0, #HI(0x02000000)\n"
		"	BNZ	1b\n"
		: "=&d" (temp)
		: "da" (&v->counter), "bd" (i)
		: "cc");
}

static inline void atomic_sub(int i, atomic_t *v)
{
	int temp;

	asm volatile (
		"1:	LNKGETD %0, [%1]\n"
		"	SUB	%0, %0, %2\n"
		"	LNKSETD [%1], %0\n"
		"	DEFR	%0, TXSTAT\n"
		"	ANDT	%0, %0, #HI(0x3f000000)\n"
		"	CMPT	%0, #HI(0x02000000)\n"
		"	BNZ 1b\n"
		: "=&d" (temp)
		: "da" (&v->counter), "bd" (i)
		: "cc");
}

static inline int atomic_add_return(int i, atomic_t *v)
{
	int result, temp;

	smp_mb();

	asm volatile (
		"1:	LNKGETD %1, [%2]\n"
		"	ADD	%1, %1, %3\n"
		"	LNKSETD [%2], %1\n"
		"	DEFR	%0, TXSTAT\n"
		"	ANDT	%0, %0, #HI(0x3f000000)\n"
		"	CMPT	%0, #HI(0x02000000)\n"
		"	BNZ 1b\n"
		: "=&d" (temp), "=&da" (result)
		: "da" (&v->counter), "bd" (i)
		: "cc");

	smp_mb();

	return result;
}

static inline int atomic_sub_return(int i, atomic_t *v)
{
	int result, temp;

	smp_mb();

	asm volatile (
		"1:	LNKGETD %1, [%2]\n"
		"	SUB	%1, %1, %3\n"
		"	LNKSETD [%2], %1\n"
		"	DEFR	%0, TXSTAT\n"
		"	ANDT	%0, %0, #HI(0x3f000000)\n"
		"	CMPT	%0, #HI(0x02000000)\n"
		"	BNZ	1b\n"
		: "=&d" (temp), "=&da" (result)
		: "da" (&v->counter), "bd" (i)
		: "cc");

	smp_mb();

	return result;
}

static inline void atomic_clear_mask(unsigned int mask, atomic_t *v)
{
	int temp;

	asm volatile (
		"1:	LNKGETD %0, [%1]\n"
		"	AND	%0, %0, %2\n"
		"	LNKSETD	[%1] %0\n"
		"	DEFR	%0, TXSTAT\n"
		"	ANDT	%0, %0, #HI(0x3f000000)\n"
		"	CMPT	%0, #HI(0x02000000)\n"
		"	BNZ	1b\n"
		: "=&d" (temp)
		: "da" (&v->counter), "bd" (~mask)
		: "cc");
}

static inline void atomic_set_mask(unsigned int mask, atomic_t *v)
{
	int temp;

	asm volatile (
		"1:	LNKGETD %0, [%1]\n"
		"	OR	%0, %0, %2\n"
		"	LNKSETD	[%1], %0\n"
		"	DEFR	%0, TXSTAT\n"
		"	ANDT	%0, %0, #HI(0x3f000000)\n"
		"	CMPT	%0, #HI(0x02000000)\n"
		"	BNZ	1b\n"
		: "=&d" (temp)
		: "da" (&v->counter), "bd" (mask)
		: "cc");
}

static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
{
	int result, temp;

	smp_mb();

	asm volatile (
		"1:	LNKGETD	%1, [%2]\n"
		"	CMP	%1, %3\n"
		"	LNKSETDEQ [%2], %4\n"
		"	BNE	2f\n"
		"	DEFR	%0, TXSTAT\n"
		"	ANDT	%0, %0, #HI(0x3f000000)\n"
		"	CMPT	%0, #HI(0x02000000)\n"
		"	BNZ	1b\n"
		"2:\n"
		: "=&d" (temp), "=&d" (result)
		: "da" (&v->counter), "bd" (old), "da" (new)
		: "cc");

	smp_mb();

	return result;
}

static inline int atomic_xchg(atomic_t *v, int new)
{
	int temp, old;

	asm volatile (
		"1:	LNKGETD %1, [%2]\n"
		"	LNKSETD	[%2], %3\n"
		"	DEFR	%0, TXSTAT\n"
		"	ANDT	%0, %0, #HI(0x3f000000)\n"
		"	CMPT	%0, #HI(0x02000000)\n"
		"	BNZ	1b\n"
		: "=&d" (temp), "=&d" (old)
		: "da" (&v->counter), "da" (new)
		: "cc");

	return old;
}

static inline int __atomic_add_unless(atomic_t *v, int a, int u)
{
	int result, temp;

	smp_mb();

	asm volatile (
		"1:	LNKGETD %1, [%2]\n"
		"	CMP	%1, %3\n"
		"	ADD	%0, %1, %4\n"
		"	LNKSETDNE [%2], %0\n"
		"	BEQ	2f\n"
		"	DEFR	%0, TXSTAT\n"
		"	ANDT	%0, %0, #HI(0x3f000000)\n"
		"	CMPT	%0, #HI(0x02000000)\n"
		"	BNZ	1b\n"
		"2:\n"
		: "=&d" (temp), "=&d" (result)
		: "da" (&v->counter), "bd" (u), "bd" (a)
		: "cc");

	smp_mb();

	return result;
}

static inline int atomic_sub_if_positive(int i, atomic_t *v)
{
	int result, temp;

	asm volatile (
		"1:	LNKGETD %1, [%2]\n"
		"	SUBS	%1, %1, %3\n"
		"	LNKSETDGE [%2], %1\n"
		"	BLT	2f\n"
		"	DEFR	%0, TXSTAT\n"
		"	ANDT	%0, %0, #HI(0x3f000000)\n"
		"	CMPT	%0, #HI(0x02000000)\n"
		"	BNZ	1b\n"
		"2:\n"
		: "=&d" (temp), "=&da" (result)
		: "da" (&v->counter), "bd" (i)
		: "cc");

	return result;
}

#endif /* __ASM_METAG_ATOMIC_LNKGET_H */
+160 −0
Original line number Original line Diff line number Diff line
#ifndef __ASM_METAG_ATOMIC_LOCK1_H
#define __ASM_METAG_ATOMIC_LOCK1_H

#define ATOMIC_INIT(i)	{ (i) }

#include <linux/compiler.h>

#include <asm/barrier.h>
#include <asm/global_lock.h>

static inline int atomic_read(const atomic_t *v)
{
	return (v)->counter;
}

/*
 * atomic_set needs to be take the lock to protect atomic_add_unless from a
 * possible race, as it reads the counter twice:
 *
 *  CPU0                               CPU1
 *  atomic_add_unless(1, 0)
 *    ret = v->counter (non-zero)
 *    if (ret != u)                    v->counter = 0
 *      v->counter += 1 (counter set to 1)
 *
 * Making atomic_set take the lock ensures that ordering and logical
 * consistency is preserved.
 */
static inline int atomic_set(atomic_t *v, int i)
{
	unsigned long flags;

	__global_lock1(flags);
	fence();
	v->counter = i;
	__global_unlock1(flags);
	return i;
}

static inline void atomic_add(int i, atomic_t *v)
{
	unsigned long flags;

	__global_lock1(flags);
	fence();
	v->counter += i;
	__global_unlock1(flags);
}

static inline void atomic_sub(int i, atomic_t *v)
{
	unsigned long flags;

	__global_lock1(flags);
	fence();
	v->counter -= i;
	__global_unlock1(flags);
}

static inline int atomic_add_return(int i, atomic_t *v)
{
	unsigned long result;
	unsigned long flags;

	__global_lock1(flags);
	result = v->counter;
	result += i;
	fence();
	v->counter = result;
	__global_unlock1(flags);

	return result;
}

static inline int atomic_sub_return(int i, atomic_t *v)
{
	unsigned long result;
	unsigned long flags;

	__global_lock1(flags);
	result = v->counter;
	result -= i;
	fence();
	v->counter = result;
	__global_unlock1(flags);

	return result;
}

static inline void atomic_clear_mask(unsigned int mask, atomic_t *v)
{
	unsigned long flags;

	__global_lock1(flags);
	fence();
	v->counter &= ~mask;
	__global_unlock1(flags);
}

static inline void atomic_set_mask(unsigned int mask, atomic_t *v)
{
	unsigned long flags;

	__global_lock1(flags);
	fence();
	v->counter |= mask;
	__global_unlock1(flags);
}

static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
{
	int ret;
	unsigned long flags;

	__global_lock1(flags);
	ret = v->counter;
	if (ret == old) {
		fence();
		v->counter = new;
	}
	__global_unlock1(flags);

	return ret;
}

#define atomic_xchg(v, new) (xchg(&((v)->counter), new))

static inline int __atomic_add_unless(atomic_t *v, int a, int u)
{
	int ret;
	unsigned long flags;

	__global_lock1(flags);
	ret = v->counter;
	if (ret != u) {
		fence();
		v->counter += a;
	}
	__global_unlock1(flags);

	return ret;
}

static inline int atomic_sub_if_positive(int i, atomic_t *v)
{
	int ret;
	unsigned long flags;

	__global_lock1(flags);
	ret = v->counter - 1;
	if (ret >= 0) {
		fence();
		v->counter = ret;
	}
	__global_unlock1(flags);

	return ret;
}

#endif /* __ASM_METAG_ATOMIC_LOCK1_H */
+132 −0
Original line number Original line Diff line number Diff line
#ifndef __ASM_METAG_BITOPS_H
#define __ASM_METAG_BITOPS_H

#include <linux/compiler.h>
#include <asm/barrier.h>
#include <asm/global_lock.h>

/*
 * clear_bit() doesn't provide any barrier for the compiler.
 */
#define smp_mb__before_clear_bit()	barrier()
#define smp_mb__after_clear_bit()	barrier()

#ifdef CONFIG_SMP
/*
 * These functions are the basis of our bit ops.
 */
static inline void set_bit(unsigned int bit, volatile unsigned long *p)
{
	unsigned long flags;
	unsigned long mask = 1UL << (bit & 31);

	p += bit >> 5;

	__global_lock1(flags);
	fence();
	*p |= mask;
	__global_unlock1(flags);
}

static inline void clear_bit(unsigned int bit, volatile unsigned long *p)
{
	unsigned long flags;
	unsigned long mask = 1UL << (bit & 31);

	p += bit >> 5;

	__global_lock1(flags);
	fence();
	*p &= ~mask;
	__global_unlock1(flags);
}

static inline void change_bit(unsigned int bit, volatile unsigned long *p)
{
	unsigned long flags;
	unsigned long mask = 1UL << (bit & 31);

	p += bit >> 5;

	__global_lock1(flags);
	fence();
	*p ^= mask;
	__global_unlock1(flags);
}

static inline int test_and_set_bit(unsigned int bit, volatile unsigned long *p)
{
	unsigned long flags;
	unsigned long old;
	unsigned long mask = 1UL << (bit & 31);

	p += bit >> 5;

	__global_lock1(flags);
	old = *p;
	if (!(old & mask)) {
		fence();
		*p = old | mask;
	}
	__global_unlock1(flags);

	return (old & mask) != 0;
}

static inline int test_and_clear_bit(unsigned int bit,
				     volatile unsigned long *p)
{
	unsigned long flags;
	unsigned long old;
	unsigned long mask = 1UL << (bit & 31);

	p += bit >> 5;

	__global_lock1(flags);
	old = *p;
	if (old & mask) {
		fence();
		*p = old & ~mask;
	}
	__global_unlock1(flags);

	return (old & mask) != 0;
}

static inline int test_and_change_bit(unsigned int bit,
				      volatile unsigned long *p)
{
	unsigned long flags;
	unsigned long old;
	unsigned long mask = 1UL << (bit & 31);

	p += bit >> 5;

	__global_lock1(flags);
	fence();
	old = *p;
	*p = old ^ mask;
	__global_unlock1(flags);

	return (old & mask) != 0;
}

#else
#include <asm-generic/bitops/atomic.h>
#endif /* CONFIG_SMP */

#include <asm-generic/bitops/non-atomic.h>
#include <asm-generic/bitops/find.h>
#include <asm-generic/bitops/ffs.h>
#include <asm-generic/bitops/__ffs.h>
#include <asm-generic/bitops/ffz.h>
#include <asm-generic/bitops/fls.h>
#include <asm-generic/bitops/__fls.h>
#include <asm-generic/bitops/fls64.h>
#include <asm-generic/bitops/hweight.h>
#include <asm-generic/bitops/lock.h>
#include <asm-generic/bitops/sched.h>
#include <asm-generic/bitops/le.h>
#include <asm-generic/bitops/ext2-atomic.h>

#endif /* __ASM_METAG_BITOPS_H */
+65 −0
Original line number Original line Diff line number Diff line
#ifndef __ASM_METAG_CMPXCHG_H
#define __ASM_METAG_CMPXCHG_H

#include <asm/barrier.h>

#if defined(CONFIG_METAG_ATOMICITY_IRQSOFF)
#include <asm/cmpxchg_irq.h>
#elif defined(CONFIG_METAG_ATOMICITY_LOCK1)
#include <asm/cmpxchg_lock1.h>
#elif defined(CONFIG_METAG_ATOMICITY_LNKGET)
#include <asm/cmpxchg_lnkget.h>
#endif

extern void __xchg_called_with_bad_pointer(void);

#define __xchg(ptr, x, size)				\
({							\
	unsigned long __xchg__res;			\
	volatile void *__xchg_ptr = (ptr);		\
	switch (size) {					\
	case 4:						\
		__xchg__res = xchg_u32(__xchg_ptr, x);	\
		break;					\
	case 1:						\
		__xchg__res = xchg_u8(__xchg_ptr, x);	\
		break;					\
	default:					\
		__xchg_called_with_bad_pointer();	\
		__xchg__res = x;			\
		break;					\
	}						\
							\
	__xchg__res;					\
})

#define xchg(ptr, x)	\
	((__typeof__(*(ptr)))__xchg((ptr), (unsigned long)(x), sizeof(*(ptr))))

/* This function doesn't exist, so you'll get a linker error
 * if something tries to do an invalid cmpxchg(). */
extern void __cmpxchg_called_with_bad_pointer(void);

static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
				      unsigned long new, int size)
{
	switch (size) {
	case 4:
		return __cmpxchg_u32(ptr, old, new);
	}
	__cmpxchg_called_with_bad_pointer();
	return old;
}

#define __HAVE_ARCH_CMPXCHG 1

#define cmpxchg(ptr, o, n)						\
	({								\
		__typeof__(*(ptr)) _o_ = (o);				\
		__typeof__(*(ptr)) _n_ = (n);				\
		(__typeof__(*(ptr))) __cmpxchg((ptr), (unsigned long)_o_, \
					       (unsigned long)_n_,	\
					       sizeof(*(ptr)));		\
	})

#endif /* __ASM_METAG_CMPXCHG_H */
Loading