Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 07fd256d authored by Will Deacon's avatar Will Deacon Committed by Greg Kroah-Hartman
Browse files

arm64: vdso: Avoid ISB after reading from cntvct_el0



commit 77ec462536a13d4b428a1eead725c4818a49f0b1 upstream.

We can avoid the expensive ISB instruction after reading the counter in
the vDSO gettime functions by creating a fake address hazard against a
dummy stack read, just like we do inside the kernel.

Signed-off-by: default avatarWill Deacon <will@kernel.org>
Reviewed-by: default avatarVincenzo Frascino <vincenzo.frascino@arm.com>
Link: https://lore.kernel.org/r/20210318170738.7756-5-will@kernel.org


Signed-off-by: default avatarCatalin Marinas <catalin.marinas@arm.com>
Signed-off-by: default avatarGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Signed-off-by: default avatarChanho Park <chanho61.park@samsung.com>
parent 90e498ef
Loading
Loading
Loading
Loading
+0 −21
Original line number Diff line number Diff line
@@ -165,25 +165,6 @@ static inline void arch_timer_set_cntkctl(u32 cntkctl)
	isb();
}

/*
 * Ensure that reads of the counter are treated the same as memory reads
 * for the purposes of ordering by subsequent memory barriers.
 *
 * This insanity brought to you by speculative system register reads,
 * out-of-order memory accesses, sequence locks and Thomas Gleixner.
 *
 * http://lists.infradead.org/pipermail/linux-arm-kernel/2019-February/631195.html
 */
#define arch_counter_enforce_ordering(val) do {				\
	u64 tmp, _val = (val);						\
									\
	asm volatile(							\
	"	eor	%0, %1, %1\n"					\
	"	add	%0, sp, %0\n"					\
	"	ldr	xzr, [%0]"					\
	: "=r" (tmp) : "r" (_val));					\
} while (0)

static __always_inline u64 __arch_counter_get_cntpct_stable(void)
{
	u64 cnt;
@@ -224,8 +205,6 @@ static __always_inline u64 __arch_counter_get_cntvct(void)
	return cnt;
}

#undef arch_counter_enforce_ordering

static inline int arch_timer_arch_init(void)
{
	return 0;
+19 −0
Original line number Diff line number Diff line
@@ -57,6 +57,25 @@ static inline unsigned long array_index_mask_nospec(unsigned long idx,
	return mask;
}

/*
 * Ensure that reads of the counter are treated the same as memory reads
 * for the purposes of ordering by subsequent memory barriers.
 *
 * This insanity brought to you by speculative system register reads,
 * out-of-order memory accesses, sequence locks and Thomas Gleixner.
 *
 * http://lists.infradead.org/pipermail/linux-arm-kernel/2019-February/631195.html
 */
#define arch_counter_enforce_ordering(val) do {				\
	u64 tmp, _val = (val);						\
									\
	asm volatile(							\
	"	eor	%0, %1, %1\n"					\
	"	add	%0, sp, %0\n"					\
	"	ldr	xzr, [%0]"					\
	: "=r" (tmp) : "r" (_val));					\
} while (0)

#define __smp_mb()	dmb(ish)
#define __smp_rmb()	dmb(ishld)
#define __smp_wmb()	dmb(ishst)
+1 −5
Original line number Diff line number Diff line
@@ -85,11 +85,7 @@ static __always_inline u64 __arch_get_hw_counter(s32 clock_mode)
	 */
	isb();
	asm volatile("mrs %0, cntvct_el0" : "=r" (res) :: "memory");
	/*
	 * This isb() is required to prevent that the seq lock is
	 * speculated.#
	 */
	isb();
	arch_counter_enforce_ordering(res);

	return res;
}