Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 527cd207 authored by Linus Torvalds's avatar Linus Torvalds
Browse files

Merge tag 'riscv-for-linus-4.17-mw0' of...

Merge tag 'riscv-for-linus-4.17-mw0' of git://git.kernel.org/pub/scm/linux/kernel/git/palmer/riscv-linux

Pull RISC-V updates from Palmer Dabbelt:
 "This contains the new features we'd like to incorporate into the
  RISC-V port for 4.17. We might have a bit more stuff land later in the
  merge window, but I wanted to get this out earlier just so everyone
  can see where we currently stand.

  A short summary of the changes is:

   - We've added support for dynamic ftrace on RISC-V targets.

   - There have been a handful of cleanups to our atomic and locking
     routines. They now more closely match the released RISC-V memory
     model draft.

   - Our module loading support has been cleaned up and is now enabled
     by default, despite some limitations still existing.

   - A patch to define COMMANDLINE_FORCE instead of COMMANDLINE_OVERRIDE
     so the generic device tree code picks up handling all our command
     line stuff.

  There's more information in the merge commits for each patch set"

* tag 'riscv-for-linus-4.17-mw0' of git://git.kernel.org/pub/scm/linux/kernel/git/palmer/riscv-linux: (21 commits)
  RISC-V: Rename CONFIG_CMDLINE_OVERRIDE to CONFIG_CMDLINE_FORCE
  RISC-V: Add definition of relocation types
  RISC-V: Enable module support in defconfig
  RISC-V: Support SUB32 relocation type in kernel module
  RISC-V: Support ADD32 relocation type in kernel module
  RISC-V: Support ALIGN relocation type in kernel module
  RISC-V: Support RVC_BRANCH/JUMP relocation type in kernel modulewq
  RISC-V: Support HI20/LO12_I/LO12_S relocation type in kernel module
  RISC-V: Support CALL relocation type in kernel module
  RISC-V: Support GOT_HI20/CALL_PLT relocation type in kernel module
  RISC-V: Add section of GOT.PLT for kernel module
  RISC-V: Add sections of PLT and GOT for kernel module
  riscv/atomic: Strengthen implementations with fences
  riscv/spinlock: Strengthen implementations with fences
  riscv/barrier: Define __smp_{store_release,load_acquire}
  riscv/ftrace: Add HAVE_FUNCTION_GRAPH_RET_ADDR_PTR support
  riscv/ftrace: Add DYNAMIC_FTRACE_WITH_REGS support
  riscv/ftrace: Add ARCH_SUPPORTS_FTRACE_OPS support
  riscv/ftrace: Add dynamic function graph tracer support
  riscv/ftrace: Add dynamic function tracer support
  ...
parents 23221d99 f6a11d9f
Loading
Loading
Loading
Loading
+10 −2
Original line number Diff line number Diff line
@@ -115,6 +115,9 @@ config ARCH_RV64I
	select 64BIT
	select HAVE_FUNCTION_TRACER
	select HAVE_FUNCTION_GRAPH_TRACER
	select HAVE_FTRACE_MCOUNT_RECORD
	select HAVE_DYNAMIC_FTRACE
	select HAVE_DYNAMIC_FTRACE_WITH_REGS

endchoice

@@ -132,6 +135,10 @@ choice
		bool "medium any code model"
endchoice

config MODULE_SECTIONS
	bool
	select HAVE_MOD_ARCH_SPECIFIC

choice
	prompt "Maximum Physical Memory"
	default MAXPHYSMEM_2GB if 32BIT
@@ -142,6 +149,7 @@ choice
		bool "2GiB"
	config MAXPHYSMEM_128GB
		depends on 64BIT && CMODEL_MEDANY
		select MODULE_SECTIONS if MODULES
		bool "128GiB"
endchoice

@@ -282,7 +290,7 @@ config CMDLINE_BOOL
	  in CONFIG_CMDLINE.

	  The built-in options will be concatenated to the default command
	  line if CMDLINE_OVERRIDE is set to 'N'. Otherwise, the default
	  line if CMDLINE_FORCE is set to 'N'. Otherwise, the default
	  command line will be ignored and replaced by the built-in string.

config CMDLINE
@@ -292,7 +300,7 @@ config CMDLINE
	help
	  Supply command-line options at build time by entering them here.

config CMDLINE_OVERRIDE
config CMDLINE_FORCE
	bool "Built-in command line overrides bootloader arguments"
	depends on CMDLINE_BOOL
	help
+8 −0
Original line number Diff line number Diff line
@@ -11,6 +11,9 @@
LDFLAGS         :=
OBJCOPYFLAGS    := -O binary
LDFLAGS_vmlinux :=
ifeq ($(CONFIG_DYNAMIC_FTRACE),y)
	LDFLAGS_vmlinux := --no-relax
endif
KBUILD_AFLAGS_MODULE += -fPIC
KBUILD_CFLAGS_MODULE += -fPIC

@@ -56,6 +59,11 @@ endif
ifeq ($(CONFIG_CMODEL_MEDANY),y)
	KBUILD_CFLAGS += -mcmodel=medany
endif
ifeq ($(CONFIG_MODULE_SECTIONS),y)
	KBUILD_LDFLAGS_MODULE += -T $(srctree)/arch/riscv/kernel/module.lds
endif

KBUILD_CFLAGS_MODULE += $(call cc-option,-mno-relax)

# GCC versions that support the "-mstrict-align" option default to allowing
# unaligned accesses.  While unaligned accesses are explicitly allowed in the
+2 −0
Original line number Diff line number Diff line
@@ -73,3 +73,5 @@ CONFIG_NFS_V4_2=y
CONFIG_ROOT_NFS=y
# CONFIG_RCU_TRACE is not set
CONFIG_CRYPTO_USER_API_HASH=y
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
+268 −149
Original line number Diff line number Diff line
@@ -24,6 +24,20 @@
#include <asm/barrier.h>

#define ATOMIC_INIT(i)	{ (i) }

#define __atomic_op_acquire(op, args...)				\
({									\
	typeof(op##_relaxed(args)) __ret  = op##_relaxed(args);		\
	__asm__ __volatile__(RISCV_ACQUIRE_BARRIER "" ::: "memory");	\
	__ret;								\
})

#define __atomic_op_release(op, args...)				\
({									\
	__asm__ __volatile__(RISCV_RELEASE_BARRIER "" ::: "memory");	\
	op##_relaxed(args);						\
})

static __always_inline int atomic_read(const atomic_t *v)
{
	return READ_ONCE(v->counter);
@@ -51,14 +65,15 @@ static __always_inline void atomic64_set(atomic64_t *v, long i)
 * one version to worry about.
 */
#define ATOMIC_OP(op, asm_op, I, asm_type, c_type, prefix)		\
static __always_inline void atomic##prefix##_##op(c_type i, atomic##prefix##_t *v)	\
static __always_inline							\
void atomic##prefix##_##op(c_type i, atomic##prefix##_t *v)		\
{									\
	__asm__ __volatile__ (						\
		"	amo" #asm_op "." #asm_type " zero, %1, %0"	\
		: "+A" (v->counter)					\
		: "r" (I)						\
		: "memory");						\
}
}									\

#ifdef CONFIG_GENERIC_ATOMIC64
#define ATOMIC_OPS(op, asm_op, I)					\
@@ -79,75 +94,115 @@ ATOMIC_OPS(xor, xor, i)
#undef ATOMIC_OPS

/*
 * Atomic ops that have ordered, relaxed, acquire, and relese variants.
 * Atomic ops that have ordered, relaxed, acquire, and release variants.
 * There's two flavors of these: the arithmatic ops have both fetch and return
 * versions, while the logical ops only have fetch versions.
 */
#define ATOMIC_FETCH_OP(op, asm_op, I, asm_or, c_or, asm_type, c_type, prefix)				\
static __always_inline c_type atomic##prefix##_fetch_##op##c_or(c_type i, atomic##prefix##_t *v)	\
#define ATOMIC_FETCH_OP(op, asm_op, I, asm_type, c_type, prefix)	\
static __always_inline							\
c_type atomic##prefix##_fetch_##op##_relaxed(c_type i,			\
					     atomic##prefix##_t *v)	\
{									\
	register c_type ret;						\
	__asm__ __volatile__ (						\
		"	amo" #asm_op "." #asm_type " %1, %2, %0"	\
		: "+A" (v->counter), "=r" (ret)				\
		: "r" (I)						\
		: "memory");						\
	return ret;							\
}									\
static __always_inline							\
c_type atomic##prefix##_fetch_##op(c_type i, atomic##prefix##_t *v)	\
{									\
	register c_type ret;						\
	__asm__ __volatile__ (						\
		"amo" #asm_op "." #asm_type #asm_or " %1, %2, %0"					\
		"	amo" #asm_op "." #asm_type ".aqrl  %1, %2, %0"	\
		: "+A" (v->counter), "=r" (ret)				\
		: "r" (I)						\
		: "memory");						\
	return ret;							\
}

#define ATOMIC_OP_RETURN(op, asm_op, c_op, I, asm_or, c_or, asm_type, c_type, prefix)			\
static __always_inline c_type atomic##prefix##_##op##_return##c_or(c_type i, atomic##prefix##_t *v)	\
#define ATOMIC_OP_RETURN(op, asm_op, c_op, I, asm_type, c_type, prefix)	\
static __always_inline							\
c_type atomic##prefix##_##op##_return_relaxed(c_type i,			\
					      atomic##prefix##_t *v)	\
{									\
        return atomic##prefix##_fetch_##op##c_or(i, v) c_op I;						\
        return atomic##prefix##_fetch_##op##_relaxed(i, v) c_op I;	\
}									\
static __always_inline							\
c_type atomic##prefix##_##op##_return(c_type i, atomic##prefix##_t *v)	\
{									\
        return atomic##prefix##_fetch_##op(i, v) c_op I;		\
}

#ifdef CONFIG_GENERIC_ATOMIC64
#define ATOMIC_OPS(op, asm_op, c_op, I, asm_or, c_or)				\
        ATOMIC_FETCH_OP (op, asm_op,       I, asm_or, c_or, w,  int,   )	\
        ATOMIC_OP_RETURN(op, asm_op, c_op, I, asm_or, c_or, w,  int,   )
#define ATOMIC_OPS(op, asm_op, c_op, I)					\
        ATOMIC_FETCH_OP( op, asm_op,       I, w,  int,   )		\
        ATOMIC_OP_RETURN(op, asm_op, c_op, I, w,  int,   )
#else
#define ATOMIC_OPS(op, asm_op, c_op, I, asm_or, c_or)				\
        ATOMIC_FETCH_OP (op, asm_op,       I, asm_or, c_or, w,  int,   )	\
        ATOMIC_OP_RETURN(op, asm_op, c_op, I, asm_or, c_or, w,  int,   )	\
        ATOMIC_FETCH_OP (op, asm_op,       I, asm_or, c_or, d, long, 64)	\
        ATOMIC_OP_RETURN(op, asm_op, c_op, I, asm_or, c_or, d, long, 64)
#define ATOMIC_OPS(op, asm_op, c_op, I)					\
        ATOMIC_FETCH_OP( op, asm_op,       I, w,  int,   )		\
        ATOMIC_OP_RETURN(op, asm_op, c_op, I, w,  int,   )		\
        ATOMIC_FETCH_OP( op, asm_op,       I, d, long, 64)		\
        ATOMIC_OP_RETURN(op, asm_op, c_op, I, d, long, 64)
#endif

ATOMIC_OPS(add, add, +,  i,      , _relaxed)
ATOMIC_OPS(add, add, +,  i, .aq  , _acquire)
ATOMIC_OPS(add, add, +,  i, .rl  , _release)
ATOMIC_OPS(add, add, +,  i, .aqrl,         )
ATOMIC_OPS(add, add, +,  i)
ATOMIC_OPS(sub, add, +, -i)

ATOMIC_OPS(sub, add, +, -i,      , _relaxed)
ATOMIC_OPS(sub, add, +, -i, .aq  , _acquire)
ATOMIC_OPS(sub, add, +, -i, .rl  , _release)
ATOMIC_OPS(sub, add, +, -i, .aqrl,         )
#define atomic_add_return_relaxed	atomic_add_return_relaxed
#define atomic_sub_return_relaxed	atomic_sub_return_relaxed
#define atomic_add_return		atomic_add_return
#define atomic_sub_return		atomic_sub_return

#define atomic_fetch_add_relaxed	atomic_fetch_add_relaxed
#define atomic_fetch_sub_relaxed	atomic_fetch_sub_relaxed
#define atomic_fetch_add		atomic_fetch_add
#define atomic_fetch_sub		atomic_fetch_sub

#ifndef CONFIG_GENERIC_ATOMIC64
#define atomic64_add_return_relaxed	atomic64_add_return_relaxed
#define atomic64_sub_return_relaxed	atomic64_sub_return_relaxed
#define atomic64_add_return		atomic64_add_return
#define atomic64_sub_return		atomic64_sub_return

#define atomic64_fetch_add_relaxed	atomic64_fetch_add_relaxed
#define atomic64_fetch_sub_relaxed	atomic64_fetch_sub_relaxed
#define atomic64_fetch_add		atomic64_fetch_add
#define atomic64_fetch_sub		atomic64_fetch_sub
#endif

#undef ATOMIC_OPS

#ifdef CONFIG_GENERIC_ATOMIC64
#define ATOMIC_OPS(op, asm_op, I, asm_or, c_or)				\
        ATOMIC_FETCH_OP(op, asm_op, I, asm_or, c_or, w,  int,   )
#define ATOMIC_OPS(op, asm_op, I)					\
        ATOMIC_FETCH_OP(op, asm_op, I, w,  int,   )
#else
#define ATOMIC_OPS(op, asm_op, I, asm_or, c_or)				\
        ATOMIC_FETCH_OP(op, asm_op, I, asm_or, c_or, w,  int,   )	\
        ATOMIC_FETCH_OP(op, asm_op, I, asm_or, c_or, d, long, 64)
#define ATOMIC_OPS(op, asm_op, I)					\
        ATOMIC_FETCH_OP(op, asm_op, I, w,  int,   )			\
        ATOMIC_FETCH_OP(op, asm_op, I, d, long, 64)
#endif

ATOMIC_OPS(and, and, i,      , _relaxed)
ATOMIC_OPS(and, and, i, .aq  , _acquire)
ATOMIC_OPS(and, and, i, .rl  , _release)
ATOMIC_OPS(and, and, i, .aqrl,         )
ATOMIC_OPS(and, and, i)
ATOMIC_OPS( or,  or, i)
ATOMIC_OPS(xor, xor, i)

ATOMIC_OPS( or,  or, i,      , _relaxed)
ATOMIC_OPS( or,  or, i, .aq  , _acquire)
ATOMIC_OPS( or,  or, i, .rl  , _release)
ATOMIC_OPS( or,  or, i, .aqrl,         )
#define atomic_fetch_and_relaxed	atomic_fetch_and_relaxed
#define atomic_fetch_or_relaxed		atomic_fetch_or_relaxed
#define atomic_fetch_xor_relaxed	atomic_fetch_xor_relaxed
#define atomic_fetch_and		atomic_fetch_and
#define atomic_fetch_or			atomic_fetch_or
#define atomic_fetch_xor		atomic_fetch_xor

ATOMIC_OPS(xor, xor, i,      , _relaxed)
ATOMIC_OPS(xor, xor, i, .aq  , _acquire)
ATOMIC_OPS(xor, xor, i, .rl  , _release)
ATOMIC_OPS(xor, xor, i, .aqrl,         )
#ifndef CONFIG_GENERIC_ATOMIC64
#define atomic64_fetch_and_relaxed	atomic64_fetch_and_relaxed
#define atomic64_fetch_or_relaxed	atomic64_fetch_or_relaxed
#define atomic64_fetch_xor_relaxed	atomic64_fetch_xor_relaxed
#define atomic64_fetch_and		atomic64_fetch_and
#define atomic64_fetch_or		atomic64_fetch_or
#define atomic64_fetch_xor		atomic64_fetch_xor
#endif

#undef ATOMIC_OPS

@@ -157,11 +212,13 @@ ATOMIC_OPS(xor, xor, i, .aqrl, )
/*
 * The extra atomic operations that are constructed from one of the core
 * AMO-based operations above (aside from sub, which is easier to fit above).
 * These are required to perform a barrier, but they're OK this way because
 * atomic_*_return is also required to perform a barrier.
 * These are required to perform a full barrier, but they're OK this way
 * because atomic_*_return is also required to perform a full barrier.
 *
 */
#define ATOMIC_OP(op, func_op, comp_op, I, c_type, prefix)		\
static __always_inline bool atomic##prefix##_##op(c_type i, atomic##prefix##_t *v) \
static __always_inline							\
bool atomic##prefix##_##op(c_type i, atomic##prefix##_t *v)		\
{									\
	return atomic##prefix##_##func_op##_return(i, v) comp_op I;	\
}
@@ -183,19 +240,32 @@ ATOMIC_OPS(add_negative, add, <, 0)
#undef ATOMIC_OPS

#define ATOMIC_OP(op, func_op, I, c_type, prefix)			\
static __always_inline void atomic##prefix##_##op(atomic##prefix##_t *v)	\
static __always_inline							\
void atomic##prefix##_##op(atomic##prefix##_t *v)			\
{									\
	atomic##prefix##_##func_op(I, v);				\
}

#define ATOMIC_FETCH_OP(op, func_op, I, c_type, prefix)			\
static __always_inline c_type atomic##prefix##_fetch_##op(atomic##prefix##_t *v)	\
static __always_inline							\
c_type atomic##prefix##_fetch_##op##_relaxed(atomic##prefix##_t *v)	\
{									\
	return atomic##prefix##_fetch_##func_op##_relaxed(I, v);	\
}									\
static __always_inline							\
c_type atomic##prefix##_fetch_##op(atomic##prefix##_t *v)		\
{									\
	return atomic##prefix##_fetch_##func_op(I, v);			\
}

#define ATOMIC_OP_RETURN(op, asm_op, c_op, I, c_type, prefix)		\
static __always_inline c_type atomic##prefix##_##op##_return(atomic##prefix##_t *v)	\
static __always_inline							\
c_type atomic##prefix##_##op##_return_relaxed(atomic##prefix##_t *v)	\
{									\
        return atomic##prefix##_fetch_##op##_relaxed(v) c_op I;		\
}									\
static __always_inline							\
c_type atomic##prefix##_##op##_return(atomic##prefix##_t *v)		\
{									\
        return atomic##prefix##_fetch_##op(v) c_op I;			\
}
@@ -218,13 +288,36 @@ static __always_inline c_type atomic##prefix##_##op##_return(atomic##prefix##_t
ATOMIC_OPS(inc, add, +,  1)
ATOMIC_OPS(dec, add, +, -1)

#define atomic_inc_return_relaxed	atomic_inc_return_relaxed
#define atomic_dec_return_relaxed	atomic_dec_return_relaxed
#define atomic_inc_return		atomic_inc_return
#define atomic_dec_return		atomic_dec_return

#define atomic_fetch_inc_relaxed	atomic_fetch_inc_relaxed
#define atomic_fetch_dec_relaxed	atomic_fetch_dec_relaxed
#define atomic_fetch_inc		atomic_fetch_inc
#define atomic_fetch_dec		atomic_fetch_dec

#ifndef CONFIG_GENERIC_ATOMIC64
#define atomic64_inc_return_relaxed	atomic64_inc_return_relaxed
#define atomic64_dec_return_relaxed	atomic64_dec_return_relaxed
#define atomic64_inc_return		atomic64_inc_return
#define atomic64_dec_return		atomic64_dec_return

#define atomic64_fetch_inc_relaxed	atomic64_fetch_inc_relaxed
#define atomic64_fetch_dec_relaxed	atomic64_fetch_dec_relaxed
#define atomic64_fetch_inc		atomic64_fetch_inc
#define atomic64_fetch_dec		atomic64_fetch_dec
#endif

#undef ATOMIC_OPS
#undef ATOMIC_OP
#undef ATOMIC_FETCH_OP
#undef ATOMIC_OP_RETURN

#define ATOMIC_OP(op, func_op, comp_op, I, prefix)			\
static __always_inline bool atomic##prefix##_##op(atomic##prefix##_t *v)	\
static __always_inline							\
bool atomic##prefix##_##op(atomic##prefix##_t *v)			\
{									\
	return atomic##prefix##_##func_op##_return(v) comp_op I;	\
}
@@ -238,19 +331,19 @@ ATOMIC_OP(dec_and_test, dec, ==, 0, 64)

#undef ATOMIC_OP

/* This is required to provide a barrier on success. */
/* This is required to provide a full barrier on success. */
static __always_inline int __atomic_add_unless(atomic_t *v, int a, int u)
{
       int prev, rc;

	__asm__ __volatile__ (
		"0:\n\t"
		"lr.w.aqrl  %[p],  %[c]\n\t"
		"beq        %[p],  %[u], 1f\n\t"
		"add       %[rc],  %[p], %[a]\n\t"
		"sc.w.aqrl %[rc], %[rc], %[c]\n\t"
		"bnez      %[rc], 0b\n\t"
		"1:"
		"0:	lr.w     %[p],  %[c]\n"
		"	beq      %[p],  %[u], 1f\n"
		"	add      %[rc], %[p], %[a]\n"
		"	sc.w.rl  %[rc], %[rc], %[c]\n"
		"	bnez     %[rc], 0b\n"
		"	fence    rw, rw\n"
		"1:\n"
		: [p]"=&r" (prev), [rc]"=&r" (rc), [c]"+A" (v->counter)
		: [a]"r" (a), [u]"r" (u)
		: "memory");
@@ -263,13 +356,13 @@ static __always_inline long __atomic64_add_unless(atomic64_t *v, long a, long u)
       long prev, rc;

	__asm__ __volatile__ (
		"0:\n\t"
		"lr.d.aqrl  %[p],  %[c]\n\t"
		"beq        %[p],  %[u], 1f\n\t"
		"add       %[rc],  %[p], %[a]\n\t"
		"sc.d.aqrl %[rc], %[rc], %[c]\n\t"
		"bnez      %[rc], 0b\n\t"
		"1:"
		"0:	lr.d     %[p],  %[c]\n"
		"	beq      %[p],  %[u], 1f\n"
		"	add      %[rc], %[p], %[a]\n"
		"	sc.d.rl  %[rc], %[rc], %[c]\n"
		"	bnez     %[rc], 0b\n"
		"	fence    rw, rw\n"
		"1:\n"
		: [p]"=&r" (prev), [rc]"=&r" (rc), [c]"+A" (v->counter)
		: [a]"r" (a), [u]"r" (u)
		: "memory");
@@ -300,37 +393,63 @@ static __always_inline long atomic64_inc_not_zero(atomic64_t *v)

/*
 * atomic_{cmp,}xchg is required to have exactly the same ordering semantics as
 * {cmp,}xchg and the operations that return, so they need a barrier.
 */
/*
 * FIXME: atomic_cmpxchg_{acquire,release,relaxed} are all implemented by
 * assigning the same barrier to both the LR and SC operations, but that might
 * not make any sense.  We're waiting on a memory model specification to
 * determine exactly what the right thing to do is here.
 * {cmp,}xchg and the operations that return, so they need a full barrier.
 */
#define ATOMIC_OP(c_t, prefix, c_or, size, asm_or)						\
static __always_inline c_t atomic##prefix##_cmpxchg##c_or(atomic##prefix##_t *v, c_t o, c_t n) 	\
#define ATOMIC_OP(c_t, prefix, size)					\
static __always_inline							\
c_t atomic##prefix##_xchg_relaxed(atomic##prefix##_t *v, c_t n)		\
{									\
	return __xchg_relaxed(&(v->counter), n, size);			\
}									\
static __always_inline							\
c_t atomic##prefix##_xchg_acquire(atomic##prefix##_t *v, c_t n)		\
{									\
	return __xchg_acquire(&(v->counter), n, size);			\
}									\
static __always_inline							\
c_t atomic##prefix##_xchg_release(atomic##prefix##_t *v, c_t n)		\
{									\
	return __xchg_release(&(v->counter), n, size);			\
}									\
static __always_inline							\
c_t atomic##prefix##_xchg(atomic##prefix##_t *v, c_t n)			\
{									\
	return __xchg(&(v->counter), n, size);				\
}									\
static __always_inline							\
c_t atomic##prefix##_cmpxchg_relaxed(atomic##prefix##_t *v,		\
				     c_t o, c_t n)			\
{									\
	return __cmpxchg_relaxed(&(v->counter), o, n, size);		\
}									\
static __always_inline							\
c_t atomic##prefix##_cmpxchg_acquire(atomic##prefix##_t *v,		\
				     c_t o, c_t n)			\
{									\
	return __cmpxchg_acquire(&(v->counter), o, n, size);		\
}									\
static __always_inline							\
c_t atomic##prefix##_cmpxchg_release(atomic##prefix##_t *v,		\
				     c_t o, c_t n)			\
{									\
	return __cmpxchg(&(v->counter), o, n, size, asm_or, asm_or);				\
	return __cmpxchg_release(&(v->counter), o, n, size);		\
}									\
static __always_inline c_t atomic##prefix##_xchg##c_or(atomic##prefix##_t *v, c_t n) 		\
static __always_inline							\
c_t atomic##prefix##_cmpxchg(atomic##prefix##_t *v, c_t o, c_t n)	\
{									\
	return __xchg(n, &(v->counter), size, asm_or);						\
	return __cmpxchg(&(v->counter), o, n, size);			\
}

#ifdef CONFIG_GENERIC_ATOMIC64
#define ATOMIC_OPS(c_or, asm_or)			\
	ATOMIC_OP( int,   , c_or, 4, asm_or)
#define ATOMIC_OPS()							\
	ATOMIC_OP( int,   , 4)
#else
#define ATOMIC_OPS(c_or, asm_or)			\
	ATOMIC_OP( int,   , c_or, 4, asm_or)		\
	ATOMIC_OP(long, 64, c_or, 8, asm_or)
#define ATOMIC_OPS()							\
	ATOMIC_OP( int,   , 4)						\
	ATOMIC_OP(long, 64, 8)
#endif

ATOMIC_OPS(        , .aqrl)
ATOMIC_OPS(_acquire,   .aq)
ATOMIC_OPS(_release,   .rl)
ATOMIC_OPS(_relaxed,      )
ATOMIC_OPS()

#undef ATOMIC_OPS
#undef ATOMIC_OP
@@ -340,13 +459,13 @@ static __always_inline int atomic_sub_if_positive(atomic_t *v, int offset)
       int prev, rc;

	__asm__ __volatile__ (
		"0:\n\t"
		"lr.w.aqrl  %[p],  %[c]\n\t"
		"sub       %[rc],  %[p], %[o]\n\t"
		"bltz      %[rc],    1f\n\t"
		"sc.w.aqrl %[rc], %[rc], %[c]\n\t"
		"bnez      %[rc],    0b\n\t"
		"1:"
		"0:	lr.w     %[p],  %[c]\n"
		"	sub      %[rc], %[p], %[o]\n"
		"	bltz     %[rc], 1f\n"
		"	sc.w.rl  %[rc], %[rc], %[c]\n"
		"	bnez     %[rc], 0b\n"
		"	fence    rw, rw\n"
		"1:\n"
		: [p]"=&r" (prev), [rc]"=&r" (rc), [c]"+A" (v->counter)
		: [o]"r" (offset)
		: "memory");
@@ -361,13 +480,13 @@ static __always_inline long atomic64_sub_if_positive(atomic64_t *v, int offset)
       long prev, rc;

	__asm__ __volatile__ (
		"0:\n\t"
		"lr.d.aqrl  %[p],  %[c]\n\t"
		"sub       %[rc],  %[p], %[o]\n\t"
		"bltz      %[rc],    1f\n\t"
		"sc.d.aqrl %[rc], %[rc], %[c]\n\t"
		"bnez      %[rc],    0b\n\t"
		"1:"
		"0:	lr.d     %[p],  %[c]\n"
		"	sub      %[rc], %[p], %[o]\n"
		"	bltz     %[rc], 1f\n"
		"	sc.d.rl  %[rc], %[rc], %[c]\n"
		"	bnez     %[rc], 0b\n"
		"	fence    rw, rw\n"
		"1:\n"
		: [p]"=&r" (prev), [rc]"=&r" (rc), [c]"+A" (v->counter)
		: [o]"r" (offset)
		: "memory");
+15 −0
Original line number Diff line number Diff line
@@ -38,6 +38,21 @@
#define __smp_rmb()	RISCV_FENCE(r,r)
#define __smp_wmb()	RISCV_FENCE(w,w)

#define __smp_store_release(p, v)					\
do {									\
	compiletime_assert_atomic_type(*p);				\
	RISCV_FENCE(rw,w);						\
	WRITE_ONCE(*p, v);						\
} while (0)

#define __smp_load_acquire(p)						\
({									\
	typeof(*p) ___p1 = READ_ONCE(*p);				\
	compiletime_assert_atomic_type(*p);				\
	RISCV_FENCE(r,rw);						\
	___p1;								\
})

/*
 * This is a very specific barrier: it's currently only used in two places in
 * the kernel, both in the scheduler.  See include/linux/spinlock.h for the two
Loading