Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit e1ba1c99 authored by Linus Torvalds's avatar Linus Torvalds
Browse files

Merge tag 'riscv-for-linus-4.15-rc2_cleanups' of...

Merge tag 'riscv-for-linus-4.15-rc2_cleanups' of git://git.kernel.org/pub/scm/linux/kernel/git/palmer/linux

Pull RISC-V cleanups and ABI fixes from Palmer Dabbelt:
 "This contains a handful of small cleanups that are a result of
  feedback that didn't make it into our original patch set, either
  because the feedback hadn't been given yet, I missed the original
  emails, or we weren't ready to submit the changes yet.

  I've been maintaining the various cleanup patch sets I have as their
  own branches, which I then merged together and signed. Each merge
  commit has a short summary of the changes, and each branch is based on
  your latest tag (4.15-rc1, in this case). If this isn't the right way
  to do this then feel free to suggest something else, but it seems sane
  to me.

  Here's a short summary of the changes, roughly in order of how
  interesting they are.

   - libgcc.h has been moved from include/lib, where it's the only
     member, to include/linux. This is meant to avoid tab completion
     conflicts.

   - VDSO entries for clock_get/gettimeofday/getcpu have been added.
     These are simple syscalls now, but we want to let glibc use them
     from the start so we can make them faster later.

   - A VDSO entry for instruction cache flushing has been added so
     userspace can flush the instruction cache.

   - The VDSO symbol versions for __vdso_cmpxchg{32,64} have been
     removed, as those VDSO entries don't actually exist.

   - __io_writes has been corrected to respect the given type.

   - A new READ_ONCE in arch_spin_is_locked().

   - __test_and_op_bit_ord() is now actually ordered.

   - Various small fixes throughout the tree to enable allmodconfig to
     build cleanly.

   - Removal of some dead code in our atomic support headers.

   - Improvements to various comments in our atomic support headers"

* tag 'riscv-for-linus-4.15-rc2_cleanups' of git://git.kernel.org/pub/scm/linux/kernel/git/palmer/linux: (23 commits)
  RISC-V: __io_writes should respect the length argument
  move libgcc.h to include/linux
  RISC-V: Clean up an unused include
  RISC-V: Allow userspace to flush the instruction cache
  RISC-V: Flush I$ when making a dirty page executable
  RISC-V: Add missing include
  RISC-V: Use define for get_cycles like other architectures
  RISC-V: Provide stub of setup_profiling_timer()
  RISC-V: Export some expected symbols for modules
  RISC-V: move empty_zero_page definition to C and export it
  RISC-V: io.h: type fixes for warnings
  RISC-V: use RISCV_{INT,SHORT} instead of {INT,SHORT} for asm macros
  RISC-V: use generic serial.h
  RISC-V: remove spin_unlock_wait()
  RISC-V: `sfence.vma` orderes the instruction cache
  RISC-V: Add READ_ONCE in arch_spin_is_locked()
  RISC-V: __test_and_op_bit_ord should be strongly ordered
  RISC-V: Remove smb_mb__{before,after}_spinlock()
  RISC-V: Remove __smp_bp__{before,after}_atomic
  RISC-V: Comment on why {,cmp}xchg is ordered how it is
  ...
parents 4b1967c9 3b62de26
Loading
Loading
Loading
Loading
+1 −0
Original line number Diff line number Diff line
@@ -40,6 +40,7 @@ generic-y += resource.h
generic-y += scatterlist.h
generic-y += sections.h
generic-y += sembuf.h
generic-y += serial.h
generic-y += setup.h
generic-y += shmbuf.h
generic-y += shmparam.h
+6 −6
Original line number Diff line number Diff line
@@ -58,17 +58,17 @@
#endif

#if (__SIZEOF_INT__ == 4)
#define INT		__ASM_STR(.word)
#define SZINT		__ASM_STR(4)
#define LGINT		__ASM_STR(2)
#define RISCV_INT		__ASM_STR(.word)
#define RISCV_SZINT		__ASM_STR(4)
#define RISCV_LGINT		__ASM_STR(2)
#else
#error "Unexpected __SIZEOF_INT__"
#endif

#if (__SIZEOF_SHORT__ == 2)
#define SHORT		__ASM_STR(.half)
#define SZSHORT		__ASM_STR(2)
#define LGSHORT		__ASM_STR(1)
#define RISCV_SHORT		__ASM_STR(.half)
#define RISCV_SZSHORT		__ASM_STR(2)
#define RISCV_LGSHORT		__ASM_STR(1)
#else
#error "Unexpected __SIZEOF_SHORT__"
#endif
+54 −49
Original line number Diff line number Diff line
@@ -50,7 +50,7 @@ static __always_inline void atomic64_set(atomic64_t *v, long i)
 * have the AQ or RL bits set.  These don't return anything, so there's only
 * one version to worry about.
 */
#define ATOMIC_OP(op, asm_op, c_op, I, asm_type, c_type, prefix)				\
#define ATOMIC_OP(op, asm_op, I, asm_type, c_type, prefix)				\
static __always_inline void atomic##prefix##_##op(c_type i, atomic##prefix##_t *v)	\
{											\
	__asm__ __volatile__ (								\
@@ -61,19 +61,19 @@ static __always_inline void atomic##prefix##_##op(c_type i, atomic##prefix##_t *
}

#ifdef CONFIG_GENERIC_ATOMIC64
#define ATOMIC_OPS(op, asm_op, c_op, I)			\
        ATOMIC_OP (op, asm_op, c_op, I, w,  int,   )
#define ATOMIC_OPS(op, asm_op, I)			\
        ATOMIC_OP (op, asm_op, I, w,  int,   )
#else
#define ATOMIC_OPS(op, asm_op, c_op, I)			\
        ATOMIC_OP (op, asm_op, c_op, I, w,  int,   )	\
        ATOMIC_OP (op, asm_op, c_op, I, d, long, 64)
#define ATOMIC_OPS(op, asm_op, I)			\
        ATOMIC_OP (op, asm_op, I, w,  int,   )	\
        ATOMIC_OP (op, asm_op, I, d, long, 64)
#endif

ATOMIC_OPS(add, add, +,  i)
ATOMIC_OPS(sub, add, +, -i)
ATOMIC_OPS(and, and, &,  i)
ATOMIC_OPS( or,  or, |,  i)
ATOMIC_OPS(xor, xor, ^,  i)
ATOMIC_OPS(add, add,  i)
ATOMIC_OPS(sub, add, -i)
ATOMIC_OPS(and, and,  i)
ATOMIC_OPS( or,  or,  i)
ATOMIC_OPS(xor, xor,  i)

#undef ATOMIC_OP
#undef ATOMIC_OPS
@@ -83,7 +83,7 @@ ATOMIC_OPS(xor, xor, ^, i)
 * There's two flavors of these: the arithmatic ops have both fetch and return
 * versions, while the logical ops only have fetch versions.
 */
#define ATOMIC_FETCH_OP(op, asm_op, c_op, I, asm_or, c_or, asm_type, c_type, prefix)			\
#define ATOMIC_FETCH_OP(op, asm_op, I, asm_or, c_or, asm_type, c_type, prefix)				\
static __always_inline c_type atomic##prefix##_fetch_##op##c_or(c_type i, atomic##prefix##_t *v)	\
{													\
	register c_type ret;										\
@@ -103,13 +103,13 @@ static __always_inline c_type atomic##prefix##_##op##_return##c_or(c_type i, ato

#ifdef CONFIG_GENERIC_ATOMIC64
#define ATOMIC_OPS(op, asm_op, c_op, I, asm_or, c_or)				\
        ATOMIC_FETCH_OP (op, asm_op, c_op, I, asm_or, c_or, w,  int,   )	\
        ATOMIC_FETCH_OP (op, asm_op,       I, asm_or, c_or, w,  int,   )	\
        ATOMIC_OP_RETURN(op, asm_op, c_op, I, asm_or, c_or, w,  int,   )
#else
#define ATOMIC_OPS(op, asm_op, c_op, I, asm_or, c_or)				\
        ATOMIC_FETCH_OP (op, asm_op, c_op, I, asm_or, c_or, w,  int,   )	\
        ATOMIC_FETCH_OP (op, asm_op,       I, asm_or, c_or, w,  int,   )	\
        ATOMIC_OP_RETURN(op, asm_op, c_op, I, asm_or, c_or, w,  int,   )	\
        ATOMIC_FETCH_OP (op, asm_op, c_op, I, asm_or, c_or, d, long, 64)	\
        ATOMIC_FETCH_OP (op, asm_op,       I, asm_or, c_or, d, long, 64)	\
        ATOMIC_OP_RETURN(op, asm_op, c_op, I, asm_or, c_or, d, long, 64)
#endif

@@ -126,28 +126,28 @@ ATOMIC_OPS(sub, add, +, -i, .aqrl, )
#undef ATOMIC_OPS

#ifdef CONFIG_GENERIC_ATOMIC64
#define ATOMIC_OPS(op, asm_op, c_op, I, asm_or, c_or)				\
        ATOMIC_FETCH_OP(op, asm_op, c_op, I, asm_or, c_or, w,  int,   )
#define ATOMIC_OPS(op, asm_op, I, asm_or, c_or)				\
        ATOMIC_FETCH_OP(op, asm_op, I, asm_or, c_or, w,  int,   )
#else
#define ATOMIC_OPS(op, asm_op, c_op, I, asm_or, c_or)				\
        ATOMIC_FETCH_OP(op, asm_op, c_op, I, asm_or, c_or, w,  int,   )		\
        ATOMIC_FETCH_OP(op, asm_op, c_op, I, asm_or, c_or, d, long, 64)
#define ATOMIC_OPS(op, asm_op, I, asm_or, c_or)				\
        ATOMIC_FETCH_OP(op, asm_op, I, asm_or, c_or, w,  int,   )	\
        ATOMIC_FETCH_OP(op, asm_op, I, asm_or, c_or, d, long, 64)
#endif

ATOMIC_OPS(and, and, &,  i,      , _relaxed)
ATOMIC_OPS(and, and, &,  i, .aq  , _acquire)
ATOMIC_OPS(and, and, &,  i, .rl  , _release)
ATOMIC_OPS(and, and, &,  i, .aqrl,         )
ATOMIC_OPS(and, and, i,      , _relaxed)
ATOMIC_OPS(and, and, i, .aq  , _acquire)
ATOMIC_OPS(and, and, i, .rl  , _release)
ATOMIC_OPS(and, and, i, .aqrl,         )

ATOMIC_OPS( or,  or, |,  i,      , _relaxed)
ATOMIC_OPS( or,  or, |,  i, .aq  , _acquire)
ATOMIC_OPS( or,  or, |,  i, .rl  , _release)
ATOMIC_OPS( or,  or, |,  i, .aqrl,         )
ATOMIC_OPS( or,  or, i,      , _relaxed)
ATOMIC_OPS( or,  or, i, .aq  , _acquire)
ATOMIC_OPS( or,  or, i, .rl  , _release)
ATOMIC_OPS( or,  or, i, .aqrl,         )

ATOMIC_OPS(xor, xor, ^,  i,      , _relaxed)
ATOMIC_OPS(xor, xor, ^,  i, .aq  , _acquire)
ATOMIC_OPS(xor, xor, ^,  i, .rl  , _release)
ATOMIC_OPS(xor, xor, ^,  i, .aqrl,         )
ATOMIC_OPS(xor, xor, i,      , _relaxed)
ATOMIC_OPS(xor, xor, i, .aq  , _acquire)
ATOMIC_OPS(xor, xor, i, .rl  , _release)
ATOMIC_OPS(xor, xor, i, .aqrl,         )

#undef ATOMIC_OPS

@@ -182,13 +182,13 @@ ATOMIC_OPS(add_negative, add, <, 0)
#undef ATOMIC_OP
#undef ATOMIC_OPS

#define ATOMIC_OP(op, func_op, c_op, I, c_type, prefix)				\
#define ATOMIC_OP(op, func_op, I, c_type, prefix)				\
static __always_inline void atomic##prefix##_##op(atomic##prefix##_t *v)	\
{										\
	atomic##prefix##_##func_op(I, v);					\
}

#define ATOMIC_FETCH_OP(op, func_op, c_op, I, c_type, prefix)				\
#define ATOMIC_FETCH_OP(op, func_op, I, c_type, prefix)					\
static __always_inline c_type atomic##prefix##_fetch_##op(atomic##prefix##_t *v)	\
{											\
	return atomic##prefix##_fetch_##func_op(I, v);					\
@@ -202,16 +202,16 @@ static __always_inline c_type atomic##prefix##_##op##_return(atomic##prefix##_t

#ifdef CONFIG_GENERIC_ATOMIC64
#define ATOMIC_OPS(op, asm_op, c_op, I)						\
        ATOMIC_OP       (op, asm_op, c_op, I,  int,   )				\
        ATOMIC_FETCH_OP (op, asm_op, c_op, I,  int,   )				\
        ATOMIC_OP       (op, asm_op,       I,  int,   )				\
        ATOMIC_FETCH_OP (op, asm_op,       I,  int,   )				\
        ATOMIC_OP_RETURN(op, asm_op, c_op, I,  int,   )
#else
#define ATOMIC_OPS(op, asm_op, c_op, I)						\
        ATOMIC_OP       (op, asm_op, c_op, I,  int,   )				\
        ATOMIC_FETCH_OP (op, asm_op, c_op, I,  int,   )				\
        ATOMIC_OP       (op, asm_op,       I,  int,   )				\
        ATOMIC_FETCH_OP (op, asm_op,       I,  int,   )				\
        ATOMIC_OP_RETURN(op, asm_op, c_op, I,  int,   )				\
        ATOMIC_OP       (op, asm_op, c_op, I, long, 64)				\
        ATOMIC_FETCH_OP (op, asm_op, c_op, I, long, 64)				\
        ATOMIC_OP       (op, asm_op,       I, long, 64)				\
        ATOMIC_FETCH_OP (op, asm_op,       I, long, 64)				\
        ATOMIC_OP_RETURN(op, asm_op, c_op, I, long, 64)
#endif

@@ -300,8 +300,13 @@ static __always_inline long atomic64_inc_not_zero(atomic64_t *v)

/*
 * atomic_{cmp,}xchg is required to have exactly the same ordering semantics as
 * {cmp,}xchg and the operations that return, so they need a barrier.  We just
 * use the other implementations directly.
 * {cmp,}xchg and the operations that return, so they need a barrier.
 */
/*
 * FIXME: atomic_cmpxchg_{acquire,release,relaxed} are all implemented by
 * assigning the same barrier to both the LR and SC operations, but that might
 * not make any sense.  We're waiting on a memory model specification to
 * determine exactly what the right thing to do is here.
 */
#define ATOMIC_OP(c_t, prefix, c_or, size, asm_or)						\
static __always_inline c_t atomic##prefix##_cmpxchg##c_or(atomic##prefix##_t *v, c_t o, c_t n) 	\
+0 −23
Original line number Diff line number Diff line
@@ -38,29 +38,6 @@
#define smp_rmb()	RISCV_FENCE(r,r)
#define smp_wmb()	RISCV_FENCE(w,w)

/*
 * These fences exist to enforce ordering around the relaxed AMOs.  The
 * documentation defines that
 * "
 *     atomic_fetch_add();
 *   is equivalent to:
 *     smp_mb__before_atomic();
 *     atomic_fetch_add_relaxed();
 *     smp_mb__after_atomic();
 * "
 * So we emit full fences on both sides.
 */
#define __smb_mb__before_atomic()	smp_mb()
#define __smb_mb__after_atomic()	smp_mb()

/*
 * These barriers prevent accesses performed outside a spinlock from being moved
 * inside a spinlock.  Since RISC-V sets the aq/rl bits on our spinlock only
 * enforce release consistency, we need full fences here.
 */
#define smb_mb__before_spinlock()	smp_mb()
#define smb_mb__after_spinlock()	smp_mb()

#include <asm-generic/barrier.h>

#endif /* __ASSEMBLY__ */
+1 −1
Original line number Diff line number Diff line
@@ -67,7 +67,7 @@
		: "memory");

#define __test_and_op_bit(op, mod, nr, addr) 			\
	__test_and_op_bit_ord(op, mod, nr, addr, )
	__test_and_op_bit_ord(op, mod, nr, addr, .aqrl)
#define __op_bit(op, mod, nr, addr)				\
	__op_bit_ord(op, mod, nr, addr, )

Loading