Loading Documentation/features/sched/membarrier-sync-core/arch-support.txt +4 −4 Original line number Diff line number Diff line Loading @@ -5,10 +5,10 @@ # # Architecture requirements # # * arm64 # * arm/arm64 # # Rely on eret context synchronization when returning from IPI handler, and # when returning to user-space. # Rely on implicit context synchronization as a result of exception return # when returning from IPI handler, and when returning to user-space. # # * x86 # Loading @@ -31,7 +31,7 @@ ----------------------- | alpha: | TODO | | arc: | TODO | | arm: | TODO | | arm: | ok | | arm64: | ok | | c6x: | TODO | | h8300: | TODO | Loading arch/arm/Kconfig +1 −0 Original line number Diff line number Diff line Loading @@ -9,6 +9,7 @@ config ARM select ARCH_HAS_ELF_RANDOMIZE select ARCH_HAS_FORTIFY_SOURCE select ARCH_HAS_KCOV select ARCH_HAS_MEMBARRIER_SYNC_CORE select ARCH_HAS_PTE_SPECIAL if ARM_LPAE select ARCH_HAS_PHYS_TO_DMA select ARCH_HAS_SET_MEMORY Loading arch/arm/Makefile +2 −2 Original line number Diff line number Diff line Loading @@ -46,12 +46,12 @@ ifeq ($(CONFIG_CPU_BIG_ENDIAN),y) KBUILD_CPPFLAGS += -mbig-endian CHECKFLAGS += -D__ARMEB__ AS += -EB LD += -EB LDFLAGS += -EB else KBUILD_CPPFLAGS += -mlittle-endian CHECKFLAGS += -D__ARMEL__ AS += -EL LD += -EL LDFLAGS += -EL endif # Loading arch/arm/include/asm/assembler.h +4 −0 Original line number Diff line number Diff line Loading @@ -460,6 +460,10 @@ THUMB( orr \reg , \reg , #PSR_T_BIT ) adds \tmp, \addr, #\size - 1 sbcccs \tmp, \tmp, \limit bcs \bad #ifdef CONFIG_CPU_SPECTRE movcs \addr, #0 csdb #endif #endif .endm Loading arch/arm/include/asm/bitops.h +9 −83 Original line number Diff line number Diff line Loading @@ -215,7 +215,6 @@ extern int _find_next_bit_be(const unsigned long *p, int size, int offset); #if __LINUX_ARM_ARCH__ < 5 #include <asm-generic/bitops/ffz.h> #include <asm-generic/bitops/__fls.h> #include <asm-generic/bitops/__ffs.h> #include <asm-generic/bitops/fls.h> Loading @@ -223,93 +222,20 @@ extern int _find_next_bit_be(const unsigned long *p, int size, int offset); #else static inline int constant_fls(int x) { int r = 32; if (!x) return 0; if (!(x & 0xffff0000u)) { x <<= 16; r -= 16; } if (!(x & 0xff000000u)) { x <<= 8; r -= 8; } if (!(x & 0xf0000000u)) { x <<= 4; r -= 4; } if (!(x & 0xc0000000u)) { x <<= 2; r -= 2; } if (!(x & 0x80000000u)) { x <<= 1; r -= 1; } return r; } /* * On ARMv5 and above those functions can be implemented around the * clz instruction for much better code efficiency. __clz returns * the number of leading zeros, zero input will return 32, and * 0x80000000 will return 0. */ static inline unsigned int __clz(unsigned int x) { unsigned int ret; asm("clz\t%0, %1" : "=r" (ret) : "r" (x)); return ret; } /* * fls() returns zero if the input is zero, otherwise returns the bit * position of the last set bit, where the LSB is 1 and MSB is 32. */ static inline int fls(int x) { if (__builtin_constant_p(x)) return constant_fls(x); return 32 - __clz(x); } /* * __fls() returns the bit position of the last bit set, where the * LSB is 0 and MSB is 31. Zero input is undefined. * On ARMv5 and above, the gcc built-ins may rely on the clz instruction * and produce optimal inlined code in all cases. On ARMv7 it is even * better by also using the rbit instruction. */ static inline unsigned long __fls(unsigned long x) { return fls(x) - 1; } /* * ffs() returns zero if the input was zero, otherwise returns the bit * position of the first set bit, where the LSB is 1 and MSB is 32. */ static inline int ffs(int x) { return fls(x & -x); } /* * __ffs() returns the bit position of the first bit set, where the * LSB is 0 and MSB is 31. Zero input is undefined. */ static inline unsigned long __ffs(unsigned long x) { return ffs(x) - 1; } #define ffz(x) __ffs( ~(x) ) #include <asm-generic/bitops/builtin-__fls.h> #include <asm-generic/bitops/builtin-__ffs.h> #include <asm-generic/bitops/builtin-fls.h> #include <asm-generic/bitops/builtin-ffs.h> #endif #include <asm-generic/bitops/ffz.h> #include <asm-generic/bitops/fls64.h> #include <asm-generic/bitops/sched.h> Loading Loading
Documentation/features/sched/membarrier-sync-core/arch-support.txt +4 −4 Original line number Diff line number Diff line Loading @@ -5,10 +5,10 @@ # # Architecture requirements # # * arm64 # * arm/arm64 # # Rely on eret context synchronization when returning from IPI handler, and # when returning to user-space. # Rely on implicit context synchronization as a result of exception return # when returning from IPI handler, and when returning to user-space. # # * x86 # Loading @@ -31,7 +31,7 @@ ----------------------- | alpha: | TODO | | arc: | TODO | | arm: | TODO | | arm: | ok | | arm64: | ok | | c6x: | TODO | | h8300: | TODO | Loading
arch/arm/Kconfig +1 −0 Original line number Diff line number Diff line Loading @@ -9,6 +9,7 @@ config ARM select ARCH_HAS_ELF_RANDOMIZE select ARCH_HAS_FORTIFY_SOURCE select ARCH_HAS_KCOV select ARCH_HAS_MEMBARRIER_SYNC_CORE select ARCH_HAS_PTE_SPECIAL if ARM_LPAE select ARCH_HAS_PHYS_TO_DMA select ARCH_HAS_SET_MEMORY Loading
arch/arm/Makefile +2 −2 Original line number Diff line number Diff line Loading @@ -46,12 +46,12 @@ ifeq ($(CONFIG_CPU_BIG_ENDIAN),y) KBUILD_CPPFLAGS += -mbig-endian CHECKFLAGS += -D__ARMEB__ AS += -EB LD += -EB LDFLAGS += -EB else KBUILD_CPPFLAGS += -mlittle-endian CHECKFLAGS += -D__ARMEL__ AS += -EL LD += -EL LDFLAGS += -EL endif # Loading
arch/arm/include/asm/assembler.h +4 −0 Original line number Diff line number Diff line Loading @@ -460,6 +460,10 @@ THUMB( orr \reg , \reg , #PSR_T_BIT ) adds \tmp, \addr, #\size - 1 sbcccs \tmp, \tmp, \limit bcs \bad #ifdef CONFIG_CPU_SPECTRE movcs \addr, #0 csdb #endif #endif .endm Loading
arch/arm/include/asm/bitops.h +9 −83 Original line number Diff line number Diff line Loading @@ -215,7 +215,6 @@ extern int _find_next_bit_be(const unsigned long *p, int size, int offset); #if __LINUX_ARM_ARCH__ < 5 #include <asm-generic/bitops/ffz.h> #include <asm-generic/bitops/__fls.h> #include <asm-generic/bitops/__ffs.h> #include <asm-generic/bitops/fls.h> Loading @@ -223,93 +222,20 @@ extern int _find_next_bit_be(const unsigned long *p, int size, int offset); #else static inline int constant_fls(int x) { int r = 32; if (!x) return 0; if (!(x & 0xffff0000u)) { x <<= 16; r -= 16; } if (!(x & 0xff000000u)) { x <<= 8; r -= 8; } if (!(x & 0xf0000000u)) { x <<= 4; r -= 4; } if (!(x & 0xc0000000u)) { x <<= 2; r -= 2; } if (!(x & 0x80000000u)) { x <<= 1; r -= 1; } return r; } /* * On ARMv5 and above those functions can be implemented around the * clz instruction for much better code efficiency. __clz returns * the number of leading zeros, zero input will return 32, and * 0x80000000 will return 0. */ static inline unsigned int __clz(unsigned int x) { unsigned int ret; asm("clz\t%0, %1" : "=r" (ret) : "r" (x)); return ret; } /* * fls() returns zero if the input is zero, otherwise returns the bit * position of the last set bit, where the LSB is 1 and MSB is 32. */ static inline int fls(int x) { if (__builtin_constant_p(x)) return constant_fls(x); return 32 - __clz(x); } /* * __fls() returns the bit position of the last bit set, where the * LSB is 0 and MSB is 31. Zero input is undefined. * On ARMv5 and above, the gcc built-ins may rely on the clz instruction * and produce optimal inlined code in all cases. On ARMv7 it is even * better by also using the rbit instruction. */ static inline unsigned long __fls(unsigned long x) { return fls(x) - 1; } /* * ffs() returns zero if the input was zero, otherwise returns the bit * position of the first set bit, where the LSB is 1 and MSB is 32. */ static inline int ffs(int x) { return fls(x & -x); } /* * __ffs() returns the bit position of the first bit set, where the * LSB is 0 and MSB is 31. Zero input is undefined. */ static inline unsigned long __ffs(unsigned long x) { return ffs(x) - 1; } #define ffz(x) __ffs( ~(x) ) #include <asm-generic/bitops/builtin-__fls.h> #include <asm-generic/bitops/builtin-__ffs.h> #include <asm-generic/bitops/builtin-fls.h> #include <asm-generic/bitops/builtin-ffs.h> #endif #include <asm-generic/bitops/ffz.h> #include <asm-generic/bitops/fls64.h> #include <asm-generic/bitops/sched.h> Loading