Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit f7c34874 authored by Max Filippov's avatar Max Filippov
Browse files

xtensa: add exclusive atomics support



Implement atomic primitives using exclusive access opcodes available in
the recent xtensa cores.
Since l32ex/s32ex don't have any memory ordering guarantees don't define
__smp_mb__before_atomic/__smp_mb__after_atomic to make them use memw.

Signed-off-by: default avatarMax Filippov <jcmvbkbc@gmail.com>
parent d065fcf1
Loading
Loading
Loading
Loading
+61 −1
Original line number Diff line number Diff line
@@ -56,7 +56,67 @@
 */
#define atomic_set(v,i)		WRITE_ONCE((v)->counter, (i))

#if XCHAL_HAVE_S32C1I
#if XCHAL_HAVE_EXCLUSIVE
#define ATOMIC_OP(op)							\
static inline void atomic_##op(int i, atomic_t *v)			\
{									\
	unsigned long tmp;						\
	int result;							\
									\
	__asm__ __volatile__(						\
			"1:     l32ex   %1, %3\n"			\
			"       " #op " %0, %1, %2\n"			\
			"       s32ex   %0, %3\n"			\
			"       getex   %0\n"				\
			"       beqz    %0, 1b\n"			\
			: "=&a" (result), "=&a" (tmp)			\
			: "a" (i), "a" (v)				\
			: "memory"					\
			);						\
}									\

#define ATOMIC_OP_RETURN(op)						\
static inline int atomic_##op##_return(int i, atomic_t *v)		\
{									\
	unsigned long tmp;						\
	int result;							\
									\
	__asm__ __volatile__(						\
			"1:     l32ex   %1, %3\n"			\
			"       " #op " %0, %1, %2\n"			\
			"       s32ex   %0, %3\n"			\
			"       getex   %0\n"				\
			"       beqz    %0, 1b\n"			\
			"       " #op " %0, %1, %2\n"			\
			: "=&a" (result), "=&a" (tmp)			\
			: "a" (i), "a" (v)				\
			: "memory"					\
			);						\
									\
	return result;							\
}

#define ATOMIC_FETCH_OP(op)						\
static inline int atomic_fetch_##op(int i, atomic_t *v)			\
{									\
	unsigned long tmp;						\
	int result;							\
									\
	__asm__ __volatile__(						\
			"1:     l32ex   %1, %3\n"			\
			"       " #op " %0, %1, %2\n"			\
			"       s32ex   %0, %3\n"			\
			"       getex   %0\n"				\
			"       beqz    %0, 1b\n"			\
			: "=&a" (result), "=&a" (tmp)			\
			: "a" (i), "a" (v)				\
			: "memory"					\
			);						\
									\
	return tmp;							\
}

#elif XCHAL_HAVE_S32C1I
#define ATOMIC_OP(op)							\
static inline void atomic_##op(int i, atomic_t * v)			\
{									\
+4 −0
Original line number Diff line number Diff line
@@ -9,12 +9,16 @@
#ifndef _XTENSA_SYSTEM_H
#define _XTENSA_SYSTEM_H

#include <asm/core.h>

#define mb()  ({ __asm__ __volatile__("memw" : : : "memory"); })
#define rmb() barrier()
#define wmb() mb()

#if XCHAL_HAVE_S32C1I
#define __smp_mb__before_atomic()		barrier()
#define __smp_mb__after_atomic()		barrier()
#endif

#include <asm-generic/barrier.h>

+120 −1
Original line number Diff line number Diff line
@@ -96,7 +96,126 @@ static inline unsigned long __fls(unsigned long word)

#include <asm-generic/bitops/fls64.h>

#if XCHAL_HAVE_S32C1I
#if XCHAL_HAVE_EXCLUSIVE

static inline void set_bit(unsigned int bit, volatile unsigned long *p)
{
	unsigned long tmp;
	unsigned long mask = 1UL << (bit & 31);

	p += bit >> 5;

	__asm__ __volatile__(
			"1:     l32ex   %0, %2\n"
			"       or      %0, %0, %1\n"
			"       s32ex   %0, %2\n"
			"       getex   %0\n"
			"       beqz    %0, 1b\n"
			: "=&a" (tmp)
			: "a" (mask), "a" (p)
			: "memory");
}

static inline void clear_bit(unsigned int bit, volatile unsigned long *p)
{
	unsigned long tmp;
	unsigned long mask = 1UL << (bit & 31);

	p += bit >> 5;

	__asm__ __volatile__(
			"1:     l32ex   %0, %2\n"
			"       and     %0, %0, %1\n"
			"       s32ex   %0, %2\n"
			"       getex   %0\n"
			"       beqz    %0, 1b\n"
			: "=&a" (tmp)
			: "a" (~mask), "a" (p)
			: "memory");
}

static inline void change_bit(unsigned int bit, volatile unsigned long *p)
{
	unsigned long tmp;
	unsigned long mask = 1UL << (bit & 31);

	p += bit >> 5;

	__asm__ __volatile__(
			"1:     l32ex   %0, %2\n"
			"       xor     %0, %0, %1\n"
			"       s32ex   %0, %2\n"
			"       getex   %0\n"
			"       beqz    %0, 1b\n"
			: "=&a" (tmp)
			: "a" (~mask), "a" (p)
			: "memory");
}

static inline int
test_and_set_bit(unsigned int bit, volatile unsigned long *p)
{
	unsigned long tmp, value;
	unsigned long mask = 1UL << (bit & 31);

	p += bit >> 5;

	__asm__ __volatile__(
			"1:     l32ex   %1, %3\n"
			"       or      %0, %1, %2\n"
			"       s32ex   %0, %3\n"
			"       getex   %0\n"
			"       beqz    %0, 1b\n"
			: "=&a" (tmp), "=&a" (value)
			: "a" (mask), "a" (p)
			: "memory");

	return value & mask;
}

static inline int
test_and_clear_bit(unsigned int bit, volatile unsigned long *p)
{
	unsigned long tmp, value;
	unsigned long mask = 1UL << (bit & 31);

	p += bit >> 5;

	__asm__ __volatile__(
			"1:     l32ex   %1, %3\n"
			"       and     %0, %1, %2\n"
			"       s32ex   %0, %3\n"
			"       getex   %0\n"
			"       beqz    %0, 1b\n"
			: "=&a" (tmp), "=&a" (value)
			: "a" (~mask), "a" (p)
			: "memory");

	return value & mask;
}

static inline int
test_and_change_bit(unsigned int bit, volatile unsigned long *p)
{
	unsigned long tmp, value;
	unsigned long mask = 1UL << (bit & 31);

	p += bit >> 5;

	__asm__ __volatile__(
			"1:     l32ex   %1, %3\n"
			"       xor     %0, %1, %2\n"
			"       s32ex   %0, %3\n"
			"       getex   %0\n"
			"       beqz    %0, 1b\n"
			: "=&a" (tmp), "=&a" (value)
			: "a" (mask), "a" (p)
			: "memory");

	return value & mask;
}

#elif XCHAL_HAVE_S32C1I

static inline void set_bit(unsigned int bit, volatile unsigned long *p)
{
+34 −2
Original line number Diff line number Diff line
@@ -23,7 +23,24 @@
static inline unsigned long
__cmpxchg_u32(volatile int *p, int old, int new)
{
#if XCHAL_HAVE_S32C1I
#if XCHAL_HAVE_EXCLUSIVE
	unsigned long tmp, result;

	__asm__ __volatile__(
			"1:     l32ex   %0, %3\n"
			"       bne     %0, %4, 2f\n"
			"       mov     %1, %2\n"
			"       s32ex   %1, %3\n"
			"       getex   %1\n"
			"       beqz    %1, 1b\n"
			"2:\n"
			: "=&a" (result), "=&a" (tmp)
			: "a" (new), "a" (p), "a" (old)
			: "memory"
			);

	return result;
#elif XCHAL_HAVE_S32C1I
	__asm__ __volatile__(
			"       wsr     %2, scompare1\n"
			"       s32c1i  %0, %1, 0\n"
@@ -108,7 +125,22 @@ static inline unsigned long __cmpxchg_local(volatile void *ptr,

static inline unsigned long xchg_u32(volatile int * m, unsigned long val)
{
#if XCHAL_HAVE_S32C1I
#if XCHAL_HAVE_EXCLUSIVE
	unsigned long tmp, result;

	__asm__ __volatile__(
			"1:     l32ex   %0, %3\n"
			"       mov     %1, %2\n"
			"       s32ex   %1, %3\n"
			"       getex   %1\n"
			"       beqz    %1, 1b\n"
			: "=&a" (result), "=&a" (tmp)
			: "a" (val), "a" (m)
			: "memory"
			);

	return result;
#elif XCHAL_HAVE_S32C1I
	unsigned long tmp, result;
	__asm__ __volatile__(
			"1:     l32i    %1, %2, 0\n"
+4 −0
Original line number Diff line number Diff line
@@ -6,6 +6,10 @@

#include <variant/core.h>

#ifndef XCHAL_HAVE_EXCLUSIVE
#define XCHAL_HAVE_EXCLUSIVE 0
#endif

#ifndef XCHAL_SPANNING_WAY
#define XCHAL_SPANNING_WAY 0
#endif
Loading