Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 450869d6 authored by Paolo Bonzini's avatar Paolo Bonzini
Browse files

KVM: x86: merge handle_mmio_page_fault and handle_mmio_page_fault_common



They are exactly the same, except that handle_mmio_page_fault
has an unused argument and a call to WARN_ON.  Remove the unused
argument from the callers, and move the warning to (the former)
handle_mmio_page_fault_common.

Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent a3eaa864
Loading
Loading
Loading
Loading
+5 −15
Original line number Diff line number Diff line
@@ -3359,7 +3359,7 @@ exit:
	return reserved;
}

int handle_mmio_page_fault_common(struct kvm_vcpu *vcpu, u64 addr, bool direct)
int handle_mmio_page_fault(struct kvm_vcpu *vcpu, u64 addr, bool direct)
{
	u64 spte;
	bool reserved;
@@ -3368,7 +3368,7 @@ int handle_mmio_page_fault_common(struct kvm_vcpu *vcpu, u64 addr, bool direct)
		return RET_MMIO_PF_EMULATE;

	reserved = walk_shadow_page_get_mmio_spte(vcpu, addr, &spte);
	if (unlikely(reserved))
	if (WARN_ON(reserved))
		return RET_MMIO_PF_BUG;

	if (is_mmio_spte(spte)) {
@@ -3392,17 +3392,7 @@ int handle_mmio_page_fault_common(struct kvm_vcpu *vcpu, u64 addr, bool direct)
	 */
	return RET_MMIO_PF_RETRY;
}
EXPORT_SYMBOL_GPL(handle_mmio_page_fault_common);

static int handle_mmio_page_fault(struct kvm_vcpu *vcpu, u64 addr,
				  u32 error_code, bool direct)
{
	int ret;

	ret = handle_mmio_page_fault_common(vcpu, addr, direct);
	WARN_ON(ret == RET_MMIO_PF_BUG);
	return ret;
}
EXPORT_SYMBOL_GPL(handle_mmio_page_fault);

static int nonpaging_page_fault(struct kvm_vcpu *vcpu, gva_t gva,
				u32 error_code, bool prefault)
@@ -3413,7 +3403,7 @@ static int nonpaging_page_fault(struct kvm_vcpu *vcpu, gva_t gva,
	pgprintk("%s: gva %lx error %x\n", __func__, gva, error_code);

	if (unlikely(error_code & PFERR_RSVD_MASK)) {
		r = handle_mmio_page_fault(vcpu, gva, error_code, true);
		r = handle_mmio_page_fault(vcpu, gva, true);

		if (likely(r != RET_MMIO_PF_INVALID))
			return r;
@@ -3503,7 +3493,7 @@ static int tdp_page_fault(struct kvm_vcpu *vcpu, gva_t gpa, u32 error_code,
	MMU_WARN_ON(!VALID_PAGE(vcpu->arch.mmu.root_hpa));

	if (unlikely(error_code & PFERR_RSVD_MASK)) {
		r = handle_mmio_page_fault(vcpu, gpa, error_code, true);
		r = handle_mmio_page_fault(vcpu, gpa, true);

		if (likely(r != RET_MMIO_PF_INVALID))
			return r;
+3 −3
Original line number Diff line number Diff line
@@ -56,13 +56,13 @@ void
reset_shadow_zero_bits_mask(struct kvm_vcpu *vcpu, struct kvm_mmu *context);

/*
 * Return values of handle_mmio_page_fault_common:
 * Return values of handle_mmio_page_fault:
 * RET_MMIO_PF_EMULATE: it is a real mmio page fault, emulate the instruction
 *			directly.
 * RET_MMIO_PF_INVALID: invalid spte is detected then let the real page
 *			fault path update the mmio spte.
 * RET_MMIO_PF_RETRY: let CPU fault again on the address.
 * RET_MMIO_PF_BUG: bug is detected.
 * RET_MMIO_PF_BUG: a bug was detected (and a WARN was printed).
 */
enum {
	RET_MMIO_PF_EMULATE = 1,
@@ -71,7 +71,7 @@ enum {
	RET_MMIO_PF_BUG = -1
};

int handle_mmio_page_fault_common(struct kvm_vcpu *vcpu, u64 addr, bool direct);
int handle_mmio_page_fault(struct kvm_vcpu *vcpu, u64 addr, bool direct);
void kvm_init_shadow_mmu(struct kvm_vcpu *vcpu);
void kvm_init_shadow_ept_mmu(struct kvm_vcpu *vcpu, bool execonly);

+1 −2
Original line number Diff line number Diff line
@@ -705,8 +705,7 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr, u32 error_code,
	pgprintk("%s: addr %lx err %x\n", __func__, addr, error_code);

	if (unlikely(error_code & PFERR_RSVD_MASK)) {
		r = handle_mmio_page_fault(vcpu, addr, error_code,
					      mmu_is_nested(vcpu));
		r = handle_mmio_page_fault(vcpu, addr, mmu_is_nested(vcpu));
		if (likely(r != RET_MMIO_PF_INVALID))
			return r;

+1 −1
Original line number Diff line number Diff line
@@ -5908,7 +5908,7 @@ static int handle_ept_misconfig(struct kvm_vcpu *vcpu)
		return 1;
	}

	ret = handle_mmio_page_fault_common(vcpu, gpa, true);
	ret = handle_mmio_page_fault(vcpu, gpa, true);
	if (likely(ret == RET_MMIO_PF_EMULATE))
		return x86_emulate_instruction(vcpu, gpa, 0, NULL, 0) ==
					      EMULATE_DONE;