Loading arch/arm64/Kconfig +10 −0 Original line number Diff line number Diff line Loading @@ -316,6 +316,16 @@ config ARM64_A57_ERRATA_832075 c) Following the branch instruction, there are six or more loads to device memory locations config ARM64_SEV_IN_LOCK_UNLOCK bool "Add explicit SEV in the spinlock unlock code path" def_bool ARCH_MSM8994 help In certain unexplained cases, the stlr alone might not wakeup the processor waiting in WFE on a spinlock. Add an explicity dsb and SEV in write_unlock, read_unlock and spin_unlock to ensure that the core waiting on the lock wakes up from WFE. config SMP bool "Symmetric Multi-Processing" help Loading arch/arm64/include/asm/spinlock.h +12 −0 Original line number Diff line number Diff line Loading @@ -86,6 +86,10 @@ static inline void arch_spin_unlock(arch_spinlock_t *lock) { asm volatile( " stlrh %w1, %0\n" #ifdef CONFIG_ARM64_SEV_IN_LOCK_UNLOCK " dsb sy\n" " sev\n" #endif : "=Q" (lock->owner) : "r" (lock->owner + 1) : "memory"); Loading Loading @@ -155,6 +159,10 @@ static inline void arch_write_unlock(arch_rwlock_t *rw) { asm volatile( " stlr %w1, %0\n" #ifdef CONFIG_ARM64_SEV_IN_LOCK_UNLOCK " dsb sy\n" " sev\n" #endif : "=Q" (rw->lock) : "r" (0) : "memory"); } Loading Loading @@ -198,6 +206,10 @@ static inline void arch_read_unlock(arch_rwlock_t *rw) "1: ldxr %w0, %2\n" " sub %w0, %w0, #1\n" " stlxr %w1, %w0, %2\n" #ifdef CONFIG_ARM64_SEV_IN_LOCK_UNLOCK " dsb sy\n" " sev\n" #endif " cbnz %w1, 1b\n" : "=&r" (tmp), "=&r" (tmp2), "+Q" (rw->lock) : Loading Loading
arch/arm64/Kconfig +10 −0 Original line number Diff line number Diff line Loading @@ -316,6 +316,16 @@ config ARM64_A57_ERRATA_832075 c) Following the branch instruction, there are six or more loads to device memory locations config ARM64_SEV_IN_LOCK_UNLOCK bool "Add explicit SEV in the spinlock unlock code path" def_bool ARCH_MSM8994 help In certain unexplained cases, the stlr alone might not wakeup the processor waiting in WFE on a spinlock. Add an explicity dsb and SEV in write_unlock, read_unlock and spin_unlock to ensure that the core waiting on the lock wakes up from WFE. config SMP bool "Symmetric Multi-Processing" help Loading
arch/arm64/include/asm/spinlock.h +12 −0 Original line number Diff line number Diff line Loading @@ -86,6 +86,10 @@ static inline void arch_spin_unlock(arch_spinlock_t *lock) { asm volatile( " stlrh %w1, %0\n" #ifdef CONFIG_ARM64_SEV_IN_LOCK_UNLOCK " dsb sy\n" " sev\n" #endif : "=Q" (lock->owner) : "r" (lock->owner + 1) : "memory"); Loading Loading @@ -155,6 +159,10 @@ static inline void arch_write_unlock(arch_rwlock_t *rw) { asm volatile( " stlr %w1, %0\n" #ifdef CONFIG_ARM64_SEV_IN_LOCK_UNLOCK " dsb sy\n" " sev\n" #endif : "=Q" (rw->lock) : "r" (0) : "memory"); } Loading Loading @@ -198,6 +206,10 @@ static inline void arch_read_unlock(arch_rwlock_t *rw) "1: ldxr %w0, %2\n" " sub %w0, %w0, #1\n" " stlxr %w1, %w0, %2\n" #ifdef CONFIG_ARM64_SEV_IN_LOCK_UNLOCK " dsb sy\n" " sev\n" #endif " cbnz %w1, 1b\n" : "=&r" (tmp), "=&r" (tmp2), "+Q" (rw->lock) : Loading