Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 472faffa authored by Sean Christopherson's avatar Sean Christopherson Committed by Radim Krčmář
Browse files

KVM: x86: Default to not allowing emulation retry in kvm_mmu_page_fault



Effectively force kvm_mmu_page_fault() to opt-in to allowing retry to
make it more obvious when and why it allows emulation to be retried.
Previously this approach was less convenient due to retry and
re-execute behavior being controlled by separate flags that were also
inverted in their implementations (opt-in versus opt-out).

Suggested-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
Signed-off-by: default avatarSean Christopherson <sean.j.christopherson@intel.com>
Cc: stable@vger.kernel.org
Signed-off-by: default avatarRadim Krčmář <rkrcmar@redhat.com>
parent 384bf221
Loading
Loading
Loading
Loading
+12 −6
Original line number Original line Diff line number Diff line
@@ -5217,7 +5217,7 @@ static int make_mmu_pages_available(struct kvm_vcpu *vcpu)
int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t cr2, u64 error_code,
int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t cr2, u64 error_code,
		       void *insn, int insn_len)
		       void *insn, int insn_len)
{
{
	int r, emulation_type = EMULTYPE_ALLOW_RETRY;
	int r, emulation_type = 0;
	enum emulation_result er;
	enum emulation_result er;
	bool direct = vcpu->arch.mmu.direct_map;
	bool direct = vcpu->arch.mmu.direct_map;


@@ -5230,11 +5230,9 @@ int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t cr2, u64 error_code,
	r = RET_PF_INVALID;
	r = RET_PF_INVALID;
	if (unlikely(error_code & PFERR_RSVD_MASK)) {
	if (unlikely(error_code & PFERR_RSVD_MASK)) {
		r = handle_mmio_page_fault(vcpu, cr2, direct);
		r = handle_mmio_page_fault(vcpu, cr2, direct);
		if (r == RET_PF_EMULATE) {
		if (r == RET_PF_EMULATE)
			emulation_type = 0;
			goto emulate;
			goto emulate;
	}
	}
	}


	if (r == RET_PF_INVALID) {
	if (r == RET_PF_INVALID) {
		r = vcpu->arch.mmu.page_fault(vcpu, cr2, lower_32_bits(error_code),
		r = vcpu->arch.mmu.page_fault(vcpu, cr2, lower_32_bits(error_code),
@@ -5260,8 +5258,16 @@ int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t cr2, u64 error_code,
		return 1;
		return 1;
	}
	}


	if (mmio_info_in_cache(vcpu, cr2, direct))
	/*
		emulation_type = 0;
	 * vcpu->arch.mmu.page_fault returned RET_PF_EMULATE, but we can still
	 * optimistically try to just unprotect the page and let the processor
	 * re-execute the instruction that caused the page fault.  Do not allow
	 * retrying MMIO emulation, as it's not only pointless but could also
	 * cause us to enter an infinite loop because the processor will keep
	 * faulting on the non-existent MMIO address.
	 */
	if (!mmio_info_in_cache(vcpu, cr2, direct))
		emulation_type = EMULTYPE_ALLOW_RETRY;
emulate:
emulate:
	/*
	/*
	 * On AMD platforms, under certain conditions insn_len may be zero on #NPF.
	 * On AMD platforms, under certain conditions insn_len may be zero on #NPF.