Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit bd82d4bd authored by Julien Thierry's avatar Julien Thierry Committed by Catalin Marinas
Browse files

arm64: Fix incorrect irqflag restore for priority masking

When using IRQ priority masking to disable interrupts, in order to deal
with the PSR.I state, local_irq_save() would convert the I bit into a
PMR value (GIC_PRIO_IRQOFF). This resulted in local_irq_restore()
potentially modifying the value of PMR in undesired location due to the
state of PSR.I upon flag saving [1].

In an attempt to solve this issue in a less hackish manner, introduce
a bit (GIC_PRIO_IGNORE_PMR) for the PMR values that can represent
whether PSR.I is being used to disable interrupts, in which case it
takes precedence of the status of interrupt masking via PMR.

GIC_PRIO_PSR_I_SET is chosen such that (<pmr_value> |
GIC_PRIO_PSR_I_SET) does not mask more interrupts than <pmr_value> as
some sections (e.g. arch_cpu_idle(), interrupt acknowledge path)
requires PMR not to mask interrupts that could be signaled to the
CPU when using only PSR.I.

[1] https://www.spinics.net/lists/arm-kernel/msg716956.html



Fixes: 4a503217 ("arm64: irqflags: Use ICC_PMR_EL1 for interrupt masking")
Cc: <stable@vger.kernel.org> # 5.1.x-
Reported-by: default avatarZenghui Yu <yuzenghui@huawei.com>
Cc: Steven Rostedt <rostedt@goodmis.org>
Cc: Wei Li <liwei391@huawei.com>
Cc: Will Deacon <will.deacon@arm.com>
Cc: Christoffer Dall <christoffer.dall@arm.com>
Cc: James Morse <james.morse@arm.com>
Cc: Suzuki K Pouloze <suzuki.poulose@arm.com>
Cc: Oleg Nesterov <oleg@redhat.com>
Reviewed-by: default avatarMarc Zyngier <marc.zyngier@arm.com>
Signed-off-by: default avatarJulien Thierry <julien.thierry@arm.com>
Signed-off-by: default avatarCatalin Marinas <catalin.marinas@arm.com>
parent 17ce302f
Loading
Loading
Loading
Loading
+3 −1
Original line number Diff line number Diff line
@@ -163,7 +163,9 @@ static inline bool gic_prio_masking_enabled(void)

static inline void gic_pmr_mask_irqs(void)
{
	BUILD_BUG_ON(GICD_INT_DEF_PRI <= GIC_PRIO_IRQOFF);
	BUILD_BUG_ON(GICD_INT_DEF_PRI < (GIC_PRIO_IRQOFF |
					 GIC_PRIO_PSR_I_SET));
	BUILD_BUG_ON(GICD_INT_DEF_PRI >= GIC_PRIO_IRQON);
	gic_write_pmr(GIC_PRIO_IRQOFF);
}

+41 −27
Original line number Diff line number Diff line
@@ -18,6 +18,7 @@

#include <linux/irqflags.h>

#include <asm/arch_gicv3.h>
#include <asm/cpufeature.h>

#define DAIF_PROCCTX		0
@@ -32,6 +33,11 @@ static inline void local_daif_mask(void)
		:
		:
		: "memory");

	/* Don't really care for a dsb here, we don't intend to enable IRQs */
	if (system_uses_irq_prio_masking())
		gic_write_pmr(GIC_PRIO_IRQON | GIC_PRIO_PSR_I_SET);

	trace_hardirqs_off();
}

@@ -43,7 +49,7 @@ static inline unsigned long local_daif_save(void)

	if (system_uses_irq_prio_masking()) {
		/* If IRQs are masked with PMR, reflect it in the flags */
		if (read_sysreg_s(SYS_ICC_PMR_EL1) <= GIC_PRIO_IRQOFF)
		if (read_sysreg_s(SYS_ICC_PMR_EL1) != GIC_PRIO_IRQON)
			flags |= PSR_I_BIT;
	}

@@ -59,15 +65,24 @@ static inline void local_daif_restore(unsigned long flags)
	if (!irq_disabled) {
		trace_hardirqs_on();

		if (system_uses_irq_prio_masking())
			arch_local_irq_enable();
	} else if (!(flags & PSR_A_BIT)) {
		if (system_uses_irq_prio_masking()) {
			gic_write_pmr(GIC_PRIO_IRQON);
			dsb(sy);
		}
	} else if (system_uses_irq_prio_masking()) {
		u64 pmr;

		if (!(flags & PSR_A_BIT)) {
			/*
			 * If interrupts are disabled but we can take
			 * asynchronous errors, we can take NMIs
			 */
		if (system_uses_irq_prio_masking()) {
			flags &= ~PSR_I_BIT;
			pmr = GIC_PRIO_IRQOFF;
		} else {
			pmr = GIC_PRIO_IRQON | GIC_PRIO_PSR_I_SET;
		}

		/*
		 * There has been concern that the write to daif
		 * might be reordered before this write to PMR.
@@ -87,8 +102,7 @@ static inline void local_daif_restore(unsigned long flags)
		 *
		 * So we don't need additional synchronization here.
		 */
			arch_local_irq_disable();
		}
		gic_write_pmr(pmr);
	}

	write_sysreg(flags, daif);
+27 −40
Original line number Diff line number Diff line
@@ -67,42 +67,45 @@ static inline void arch_local_irq_disable(void)
 */
static inline unsigned long arch_local_save_flags(void)
{
	unsigned long daif_bits;
	unsigned long flags;

	daif_bits = read_sysreg(daif);

	/*
	 * The asm is logically equivalent to:
	 *
	 * if (system_uses_irq_prio_masking())
	 *	flags = (daif_bits & PSR_I_BIT) ?
	 *			GIC_PRIO_IRQOFF :
	 *			read_sysreg_s(SYS_ICC_PMR_EL1);
	 * else
	 *	flags = daif_bits;
	 */
	asm volatile(ALTERNATIVE(
			"mov	%0, %1\n"
			"nop\n"
			"nop",
			__mrs_s("%0", SYS_ICC_PMR_EL1)
			"ands	%1, %1, " __stringify(PSR_I_BIT) "\n"
			"csel	%0, %0, %2, eq",
		"mrs	%0, daif",
		__mrs_s("%0", SYS_ICC_PMR_EL1),
		ARM64_HAS_IRQ_PRIO_MASKING)
		: "=&r" (flags), "+r" (daif_bits)
		: "r" ((unsigned long) GIC_PRIO_IRQOFF)
		: "cc", "memory");
		: "=&r" (flags)
		:
		: "memory");

	return flags;
}

static inline int arch_irqs_disabled_flags(unsigned long flags)
{
	int res;

	asm volatile(ALTERNATIVE(
		"and	%w0, %w1, #" __stringify(PSR_I_BIT),
		"eor	%w0, %w1, #" __stringify(GIC_PRIO_IRQON),
		ARM64_HAS_IRQ_PRIO_MASKING)
		: "=&r" (res)
		: "r" ((int) flags)
		: "memory");

	return res;
}

static inline unsigned long arch_local_irq_save(void)
{
	unsigned long flags;

	flags = arch_local_save_flags();

	/*
	 * There are too many states with IRQs disabled, just keep the current
	 * state if interrupts are already disabled/masked.
	 */
	if (!arch_irqs_disabled_flags(flags))
		arch_local_irq_disable();

	return flags;
@@ -124,21 +127,5 @@ static inline void arch_local_irq_restore(unsigned long flags)
		: "memory");
}

static inline int arch_irqs_disabled_flags(unsigned long flags)
{
	int res;

	asm volatile(ALTERNATIVE(
			"and	%w0, %w1, #" __stringify(PSR_I_BIT) "\n"
			"nop",
			"cmp	%w1, #" __stringify(GIC_PRIO_IRQOFF) "\n"
			"cset	%w0, ls",
			ARM64_HAS_IRQ_PRIO_MASKING)
		: "=&r" (res)
		: "r" ((int) flags)
		: "cc", "memory");

	return res;
}
#endif
#endif
+4 −3
Original line number Diff line number Diff line
@@ -608,12 +608,13 @@ static inline void kvm_arm_vhe_guest_enter(void)
	 * will not signal the CPU of interrupts of lower priority, and the
	 * only way to get out will be via guest exceptions.
	 * Naturally, we want to avoid this.
	 *
	 * local_daif_mask() already sets GIC_PRIO_PSR_I_SET, we just need a
	 * dsb to ensure the redistributor is forwards EL2 IRQs to the CPU.
	 */
	if (system_uses_irq_prio_masking()) {
		gic_write_pmr(GIC_PRIO_IRQON);
	if (system_uses_irq_prio_masking())
		dsb(sy);
}
}

static inline void kvm_arm_vhe_guest_exit(void)
{
+8 −2
Original line number Diff line number Diff line
@@ -35,9 +35,15 @@
 * means masking more IRQs (or at least that the same IRQs remain masked).
 *
 * To mask interrupts, we clear the most significant bit of PMR.
 *
 * Some code sections either automatically switch back to PSR.I or explicitly
 * require to not use priority masking. If bit GIC_PRIO_PSR_I_SET is included
 * in the  the priority mask, it indicates that PSR.I should be set and
 * interrupt disabling temporarily does not rely on IRQ priorities.
 */
#define GIC_PRIO_IRQON		0xf0
#define GIC_PRIO_IRQON			0xc0
#define GIC_PRIO_IRQOFF			(GIC_PRIO_IRQON & ~0x80)
#define GIC_PRIO_PSR_I_SET		(1 << 4)

/* Additional SPSR bits not exposed in the UABI */
#define PSR_IL_BIT		(1 << 20)
Loading