Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 305b15c7 authored by Linux Build Service Account's avatar Linux Build Service Account Committed by Gerrit - the friendly Code Review server
Browse files

Merge "ARM64: spinlock: Add SEV and dsb(ishst) in unlock code"

parents 446a4c80 59cae187
Loading
Loading
Loading
Loading
+9 −0
Original line number Diff line number Diff line
@@ -529,6 +529,15 @@ config CPU_BIG_ENDIAN
       help
         Say Y if you plan on running a kernel in big-endian mode.

config ARM64_SEV_IN_LOCK_UNLOCK
	bool "Add explicit SEV in the spinlock unlock code path"
	help
	  In certain unexplained cases, the stlr alone might not wakeup
	  the processor waiting in WFE on a spinlock.
	  Add an explicity dsb and SEV in write_unlock, read_unlock
	  and spin_unlock to ensure that the core waiting on the lock
	  wakes up from WFE.

config SMP
	bool "Symmetric Multi-Processing"
	help
+12 −0
Original line number Diff line number Diff line
@@ -86,6 +86,10 @@ static inline void arch_spin_unlock(arch_spinlock_t *lock)
{
	asm volatile(
"	stlrh	%w1, %0\n"
#ifdef CONFIG_ARM64_SEV_IN_LOCK_UNLOCK
"	dsb ishst\n"
"	sev\n"
#endif
	: "=Q" (lock->owner)
	: "r" (lock->owner + 1)
	: "memory");
@@ -154,6 +158,10 @@ static inline void arch_write_unlock(arch_rwlock_t *rw)
{
	asm volatile(
	"	stlr	%w1, %0\n"
#ifdef CONFIG_ARM64_SEV_IN_LOCK_UNLOCK
	"	dsb ishst\n"
	"	sev\n"
#endif
	: "=Q" (rw->lock) : "r" (0) : "memory");
}

@@ -197,6 +205,10 @@ static inline void arch_read_unlock(arch_rwlock_t *rw)
	"1:	ldxr	%w0, %2\n"
	"	sub	%w0, %w0, #1\n"
	"	stlxr	%w1, %w0, %2\n"
#ifdef CONFIG_ARM64_SEV_IN_LOCK_UNLOCK
	"	dsb ishst\n"
	"	sev\n"
#endif
	"	cbnz	%w1, 1b\n"
	: "=&r" (tmp), "=&r" (tmp2), "+Q" (rw->lock)
	: