Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit e9ee956e authored by Takuya Yoshikawa's avatar Takuya Yoshikawa Committed by Paolo Bonzini
Browse files

KVM: x86: MMU: Move handle_mmio_page_fault() call to kvm_mmu_page_fault()



Rather than placing a handle_mmio_page_fault() call in each
vcpu->arch.mmu.page_fault() handler, moving it up to
kvm_mmu_page_fault() makes the code better:

 - avoids code duplication
 - for kvm_arch_async_page_ready(), which is the other caller of
   vcpu->arch.mmu.page_fault(), removes an extra error_code check
 - avoids returning both RET_MMIO_PF_* values and raw integer values
   from vcpu->arch.mmu.page_fault()

Signed-off-by: default avatarTakuya Yoshikawa <yoshikawa_takuya_b1@lab.ntt.co.jp>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent ded58749
Loading
Loading
Loading
Loading
+16 −23
Original line number Diff line number Diff line
@@ -3370,13 +3370,6 @@ static int nonpaging_page_fault(struct kvm_vcpu *vcpu, gva_t gva,

	pgprintk("%s: gva %lx error %x\n", __func__, gva, error_code);

	if (unlikely(error_code & PFERR_RSVD_MASK)) {
		r = handle_mmio_page_fault(vcpu, gva, true);

		if (likely(r != RET_MMIO_PF_INVALID))
			return r;
	}

	r = mmu_topup_memory_caches(vcpu);
	if (r)
		return r;
@@ -3460,13 +3453,6 @@ static int tdp_page_fault(struct kvm_vcpu *vcpu, gva_t gpa, u32 error_code,

	MMU_WARN_ON(!VALID_PAGE(vcpu->arch.mmu.root_hpa));

	if (unlikely(error_code & PFERR_RSVD_MASK)) {
		r = handle_mmio_page_fault(vcpu, gpa, true);

		if (likely(r != RET_MMIO_PF_INVALID))
			return r;
	}

	r = mmu_topup_memory_caches(vcpu);
	if (r)
		return r;
@@ -4361,18 +4347,27 @@ int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t cr2, u32 error_code,
	enum emulation_result er;
	bool direct = vcpu->arch.mmu.direct_map || mmu_is_nested(vcpu);

	r = vcpu->arch.mmu.page_fault(vcpu, cr2, error_code, false);
	if (unlikely(error_code & PFERR_RSVD_MASK)) {
		r = handle_mmio_page_fault(vcpu, cr2, direct);
		if (r == RET_MMIO_PF_EMULATE) {
			emulation_type = 0;
			goto emulate;
		}
		if (r == RET_MMIO_PF_RETRY)
			return 1;
		if (r < 0)
		goto out;

	if (!r) {
		r = 1;
		goto out;
			return r;
	}

	r = vcpu->arch.mmu.page_fault(vcpu, cr2, error_code, false);
	if (r < 0)
		return r;
	if (!r)
		return 1;

	if (mmio_info_in_cache(vcpu, cr2, direct))
		emulation_type = 0;

emulate:
	er = x86_emulate_instruction(vcpu, cr2, emulation_type, insn, insn_len);

	switch (er) {
@@ -4386,8 +4381,6 @@ int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t cr2, u32 error_code,
	default:
		BUG();
	}
out:
	return r;
}
EXPORT_SYMBOL_GPL(kvm_mmu_page_fault);

+6 −13
Original line number Diff line number Diff line
@@ -702,22 +702,15 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr, u32 error_code,

	pgprintk("%s: addr %lx err %x\n", __func__, addr, error_code);

	if (unlikely(error_code & PFERR_RSVD_MASK)) {
		r = handle_mmio_page_fault(vcpu, addr, mmu_is_nested(vcpu));
		if (likely(r != RET_MMIO_PF_INVALID))
	r = mmu_topup_memory_caches(vcpu);
	if (r)
		return r;

	/*
		 * page fault with PFEC.RSVD  = 1 is caused by shadow
		 * page fault, should not be used to walk guest page
		 * table.
	 * If PFEC.RSVD is set, this is a shadow page fault.
	 * The bit needs to be cleared before walking guest page tables.
	 */
	error_code &= ~PFERR_RSVD_MASK;
	};

	r = mmu_topup_memory_caches(vcpu);
	if (r)
		return r;

	/*
	 * Look up the guest pte for the faulting address.