Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit de4a5c63 authored by James Morse's avatar James Morse Committed by Gerrit - the friendly Code Review server
Browse files

KVM: arm64: Workaround Cortex-A77 erratum 1542418 on VMID rollover



Cortex-A77's erratum 1542418 workaround needs to be applied for VMID
re-use too. This prevents the CPU correctly predicting a modified branch
based on a previous user of the VMID and ASID.

KVM doesn't use force_vm_exit or exit_vm_noop for anything other than
vmid rollover. Rename them, and use this to invoke the VMID workaround
on each CPU.

Another case where VMID and ASID may get reused is if the system is
over-provisioned and two vCPUs of the same VMID are scheduled on
one physical CPU. KVM invalidates the TLB to prevent ASID sharing
in this case, invoke the asid-rollover workaround too so we avoid
the ASID sharing tripping the erratum.

Change-Id: Ia7d82cfc785091c546b40a8a54584784a34c3e5a
Signed-off-by: default avatarJames Morse <james.morse@arm.com>
Signed-off-by: default avatarSuzuki K Poulose <suzuki.poulose@arm.com>
Patch-mainline: linux-arm-kernel @ 11/14/19, 14:59
[sramana@codeaurora.org: Resolve trivial merge conflicts]
Signed-off-by: default avatarSrinivas Ramana <sramana@codeaurora.org>
parent 3c753a6e
Loading
Loading
Loading
Loading
+5 −0
Original line number Diff line number Diff line
@@ -366,6 +366,11 @@ static inline int hyp_map_aux_data(void)

#define kvm_phys_to_vttbr(addr)		(addr)

static inline void kvm_workaround_1542418_vmid_rollover(void)
{
	/* not affected */
}

#endif	/* !__ASSEMBLY__ */

#endif /* __ARM_KVM_MMU_H__ */
+15 −0
Original line number Diff line number Diff line
@@ -20,6 +20,7 @@

#include <asm/page.h>
#include <asm/memory.h>
#include <asm/mmu_context.h>
#include <asm/cpufeature.h>

/*
@@ -528,5 +529,19 @@ static inline int hyp_map_aux_data(void)

#define kvm_phys_to_vttbr(addr)		phys_to_ttbr(addr)

static inline void kvm_workaround_1542418_vmid_rollover(void)
{
	unsigned long flags;

	if (!IS_ENABLED(CONFIG_ARM64_ERRATUM_1542418) ||
	    !cpus_have_const_cap(ARM64_WORKAROUND_1542418))
		return;

	local_irq_save(flags);
	arm64_workaround_1542418_asid_rollover();
	local_irq_restore(flags);

}

#endif /* __ASSEMBLY__ */
#endif /* __ARM64_KVM_MMU_H__ */
+14 −6
Original line number Diff line number Diff line
@@ -384,6 +384,13 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
	 */
	if (*last_ran != vcpu->vcpu_id) {
		kvm_call_hyp(__kvm_tlb_flush_local_vmid, vcpu);

		/*
		 * 'last_ran' and this vcpu may share an ASID and hit the
		 *  conditions for Cortex-A77 erratum 1542418.
		 */
		kvm_workaround_1542418_vmid_rollover();

		*last_ran = vcpu->vcpu_id;
	}

@@ -470,15 +477,16 @@ bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu *vcpu)
	return vcpu_mode_priv(vcpu);
}

/* Just ensure a guest exit from a particular CPU */
static void exit_vm_noop(void *info)
static void exit_vmid_rollover(void *info)
{
	kvm_workaround_1542418_vmid_rollover();
}

void force_vm_exit(const cpumask_t *mask)
static void force_vmid_rollover_exit(const cpumask_t *mask)
{
	preempt_disable();
	smp_call_function_many(mask, exit_vm_noop, NULL, true);
	smp_call_function_many(mask, exit_vmid_rollover, NULL, true);
	kvm_workaround_1542418_vmid_rollover();
	preempt_enable();
}

@@ -536,10 +544,10 @@ static void update_vttbr(struct kvm *kvm)

		/*
		 * On SMP we know no other CPUs can use this CPU's or each
		 * other's VMID after force_vm_exit returns since the
		 * other's VMID after force_vmid_rollover_exit returns since the
		 * kvm_vmid_lock blocks them from reentry to the guest.
		 */
		force_vm_exit(cpu_all_mask);
		force_vmid_rollover_exit(cpu_all_mask);
		/*
		 * Now broadcast TLB + ICACHE invalidation over the inner
		 * shareable domain to make sure all data structures are