Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 59cae187 authored by Rohit Vaswani's avatar Rohit Vaswani Committed by Trilok Soni
Browse files

ARM64: spinlock: Add SEV and dsb(ishst) in unlock code



In certain unexplained cases, the stlr alone might not wakeup
the processor waiting in WFE on a spinlock.

Add an explicity dsb(ishst) and SEV in write_unlock, read_unlock
and spin_unlock to ensure that the core waiting on the lock
wakes up from WFE.

ISHST variant of the DSB should be fine here since this would
be applicable for the inner shareable domain only with the
store instruction before it.

Selectable by config option ARM64_SEV_IN_LOCK_UNLOCK and it
is disabled by default.

CRs-Fixed: 962923
Change-Id: I691ff5713d4d564623b75b053b40d1f46d74868a
Signed-off-by: default avatarRohit Vaswani <rvaswani@codeaurora.org>
Signed-off-by: default avatarTrilok Soni <tsoni@codeaurora.org>
parent b7ca2f4c
Loading
Loading
Loading
Loading
+9 −0
Original line number Diff line number Diff line
@@ -529,6 +529,15 @@ config CPU_BIG_ENDIAN
       help
         Say Y if you plan on running a kernel in big-endian mode.

config ARM64_SEV_IN_LOCK_UNLOCK
	bool "Add explicit SEV in the spinlock unlock code path"
	help
	  In certain unexplained cases, the stlr alone might not wakeup
	  the processor waiting in WFE on a spinlock.
	  Add an explicity dsb and SEV in write_unlock, read_unlock
	  and spin_unlock to ensure that the core waiting on the lock
	  wakes up from WFE.

config SMP
	bool "Symmetric Multi-Processing"
	help
+12 −0
Original line number Diff line number Diff line
@@ -86,6 +86,10 @@ static inline void arch_spin_unlock(arch_spinlock_t *lock)
{
	asm volatile(
"	stlrh	%w1, %0\n"
#ifdef CONFIG_ARM64_SEV_IN_LOCK_UNLOCK
"	dsb ishst\n"
"	sev\n"
#endif
	: "=Q" (lock->owner)
	: "r" (lock->owner + 1)
	: "memory");
@@ -154,6 +158,10 @@ static inline void arch_write_unlock(arch_rwlock_t *rw)
{
	asm volatile(
	"	stlr	%w1, %0\n"
#ifdef CONFIG_ARM64_SEV_IN_LOCK_UNLOCK
	"	dsb ishst\n"
	"	sev\n"
#endif
	: "=Q" (rw->lock) : "r" (0) : "memory");
}

@@ -197,6 +205,10 @@ static inline void arch_read_unlock(arch_rwlock_t *rw)
	"1:	ldxr	%w0, %2\n"
	"	sub	%w0, %w0, #1\n"
	"	stlxr	%w1, %w0, %2\n"
#ifdef CONFIG_ARM64_SEV_IN_LOCK_UNLOCK
	"	dsb ishst\n"
	"	sev\n"
#endif
	"	cbnz	%w1, 1b\n"
	: "=&r" (tmp), "=&r" (tmp2), "+Q" (rw->lock)
	: