Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 49c07968 authored by Stepan Moskovchenko's avatar Stepan Moskovchenko Committed by Matt Wagantall
Browse files

ARM64: Insert barriers before Store-Release operations



Some early samples of MSMTHULIUM require that a barrier be
inserted prior to any Store-Release (STLR) operation, for
compliance with ARM memory ordering requirements. Create a
Kconfig option to enable such behavior.

Change-Id: Icc4f38e973b8dd2393f8ec8f1f3ac548630dfc7c
Signed-off-by: default avatarStepan Moskovchenko <stepanm@codeaurora.org>
[abhimany: rename thulium to MSM8996]
Signed-off-by: default avatarAbhimanyu Kapur <abhimany@codeaurora.org>
parent 5d683034
Loading
Loading
Loading
Loading
+11 −0
Original line number Diff line number Diff line
@@ -295,6 +295,17 @@ config CPU_BIG_ENDIAN
       help
         Say Y if you plan on running a kernel in big-endian mode.

config ARM64_STLR_NEEDS_BARRIER
	bool "Store-Release operations require explicit barriers"
	def_bool ARCH_MSM8996
	help
	  Some early samples of MSMTHULIUM SoCs require that an explicit barrier
	  be executed prior to any Store-Release operation (STLR) to comform to
	  ARM memory ordering requirements. If you are building the kernel to
	  work on one of these early designs, select 'Y' here.

	  For production kernels, you should say 'N' here.

config SMP
	bool "Symmetric Multi-Processing"
	help
+19 −0
Original line number Diff line number Diff line
@@ -58,6 +58,24 @@ do { \
#define smp_rmb()	dmb(ishld)
#define smp_wmb()	dmb(ishst)

#ifdef CONFIG_ARM64_STLR_NEEDS_BARRIER
#define smp_store_release(p, v)						\
do {									\
	compiletime_assert_atomic_type(*p);				\
	switch (sizeof(*p)) {						\
	case 4:								\
		asm volatile ("dmb nsh\n"				\
			      "stlr %w1, %0"				\
				: "=Q" (*p) : "r" (v) : "memory");	\
		break;							\
	case 8:								\
		asm volatile ("dmb nsh\n"				\
			      "stlr %1, %0"				\
				: "=Q" (*p) : "r" (v) : "memory");	\
		break;							\
	}								\
} while (0)
#else
#define smp_store_release(p, v)						\
do {									\
	compiletime_assert_atomic_type(*p);				\
@@ -72,6 +90,7 @@ do { \
		break;							\
	}								\
} while (0)
#endif

#define smp_load_acquire(p)						\
({									\
+6 −0
Original line number Diff line number Diff line
@@ -86,6 +86,9 @@ static inline int arch_spin_trylock(arch_spinlock_t *lock)
static inline void arch_spin_unlock(arch_spinlock_t *lock)
{
	asm volatile(
#ifdef CONFIG_ARM64_STLR_NEEDS_BARRIER
"	dmb nsh\n"
#endif
"	stlrh	%w1, %0\n"
	: "=Q" (lock->owner)
	: "r" (lock->owner + 1)
@@ -154,6 +157,9 @@ static inline int arch_write_trylock(arch_rwlock_t *rw)
static inline void arch_write_unlock(arch_rwlock_t *rw)
{
	asm volatile(
#ifdef CONFIG_ARM64_STLR_NEEDS_BARRIER
"	dmb nsh\n"
#endif
	"	stlr	%w1, %0\n"
	: "=Q" (rw->lock) : "r" (0) : "memory");
}