Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit e08d26f0 authored by Paolo Bonzini's avatar Paolo Bonzini Committed by Radim Krčmář
Browse files

KVM: x86: simplify ept_misconfig



Calling handle_mmio_page_fault() has been unnecessary since commit
e9ee956e ("KVM: x86: MMU: Move handle_mmio_page_fault() call to
kvm_mmu_page_fault()", 2016-02-22).

handle_mmio_page_fault() can now be made static.

Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
Reviewed-by: default avatarDavid Hildenbrand <david@redhat.com>
Signed-off-by: default avatarRadim Krčmář <rkrcmar@redhat.com>
parent 076b925d
Loading
Loading
Loading
Loading
+18 −1
Original line number Diff line number Diff line
@@ -3648,7 +3648,23 @@ walk_shadow_page_get_mmio_spte(struct kvm_vcpu *vcpu, u64 addr, u64 *sptep)
	return reserved;
}

int handle_mmio_page_fault(struct kvm_vcpu *vcpu, u64 addr, bool direct)
/*
 * Return values of handle_mmio_page_fault:
 * RET_MMIO_PF_EMULATE: it is a real mmio page fault, emulate the instruction
 *			directly.
 * RET_MMIO_PF_INVALID: invalid spte is detected then let the real page
 *			fault path update the mmio spte.
 * RET_MMIO_PF_RETRY: let CPU fault again on the address.
 * RET_MMIO_PF_BUG: a bug was detected (and a WARN was printed).
 */
enum {
	RET_MMIO_PF_EMULATE = 1,
	RET_MMIO_PF_INVALID = 2,
	RET_MMIO_PF_RETRY = 0,
	RET_MMIO_PF_BUG = -1
};

static int handle_mmio_page_fault(struct kvm_vcpu *vcpu, u64 addr, bool direct)
{
	u64 spte;
	bool reserved;
@@ -4837,6 +4853,7 @@ int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t cr2, u64 error_code,
			return 1;
		if (r < 0)
			return r;
		/* Must be RET_MMIO_PF_INVALID.  */
	}

	r = vcpu->arch.mmu.page_fault(vcpu, cr2, lower_32_bits(error_code),
+0 −17
Original line number Diff line number Diff line
@@ -56,23 +56,6 @@ void kvm_mmu_set_mmio_spte_mask(u64 mmio_mask, u64 mmio_value);
void
reset_shadow_zero_bits_mask(struct kvm_vcpu *vcpu, struct kvm_mmu *context);

/*
 * Return values of handle_mmio_page_fault:
 * RET_MMIO_PF_EMULATE: it is a real mmio page fault, emulate the instruction
 *			directly.
 * RET_MMIO_PF_INVALID: invalid spte is detected then let the real page
 *			fault path update the mmio spte.
 * RET_MMIO_PF_RETRY: let CPU fault again on the address.
 * RET_MMIO_PF_BUG: a bug was detected (and a WARN was printed).
 */
enum {
	RET_MMIO_PF_EMULATE = 1,
	RET_MMIO_PF_INVALID = 2,
	RET_MMIO_PF_RETRY = 0,
	RET_MMIO_PF_BUG = -1
};

int handle_mmio_page_fault(struct kvm_vcpu *vcpu, u64 addr, bool direct);
void kvm_init_shadow_mmu(struct kvm_vcpu *vcpu);
void kvm_init_shadow_ept_mmu(struct kvm_vcpu *vcpu, bool execonly,
			     bool accessed_dirty);
+3 −10
Original line number Diff line number Diff line
@@ -6410,17 +6410,10 @@ static int handle_ept_misconfig(struct kvm_vcpu *vcpu)
		return kvm_skip_emulated_instruction(vcpu);
	}

	ret = handle_mmio_page_fault(vcpu, gpa, true);
	vcpu->arch.gpa_available = true;
	if (likely(ret == RET_MMIO_PF_EMULATE))
		return x86_emulate_instruction(vcpu, gpa, 0, NULL, 0) ==
					      EMULATE_DONE;

	if (unlikely(ret == RET_MMIO_PF_INVALID))
		return kvm_mmu_page_fault(vcpu, gpa, 0, NULL, 0);

	if (unlikely(ret == RET_MMIO_PF_RETRY))
		return 1;
	ret = kvm_mmu_page_fault(vcpu, gpa, PFERR_RSVD_MASK, NULL, 0);
	if (ret >= 0)
		return ret;

	/* It is the real ept misconfig */
	WARN_ON(1);