Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 0959ffac authored by Joerg Roedel's avatar Joerg Roedel Committed by Avi Kivity
Browse files

KVM: MMU: Don't track nested fault info in error-code



This patch moves the detection whether a page-fault was
nested or not out of the error code and moves it into a
separate variable in the fault struct.

Signed-off-by: default avatarJoerg Roedel <joerg.roedel@amd.com>
Signed-off-by: default avatarAvi Kivity <avi@redhat.com>
parent 625831a3
Loading
Loading
Loading
Loading
+1 −0
Original line number Original line Diff line number Diff line
@@ -322,6 +322,7 @@ struct kvm_vcpu_arch {
	struct {
	struct {
		u64      address;
		u64      address;
		unsigned error_code;
		unsigned error_code;
		bool     nested;
	} fault;
	} fault;


	/* only needed in kvm_pv_mmu_op() path, but it's hot so
	/* only needed in kvm_pv_mmu_op() path, but it's hot so
+0 −1
Original line number Original line Diff line number Diff line
@@ -47,7 +47,6 @@
#define PFERR_USER_MASK (1U << 2)
#define PFERR_USER_MASK (1U << 2)
#define PFERR_RSVD_MASK (1U << 3)
#define PFERR_RSVD_MASK (1U << 3)
#define PFERR_FETCH_MASK (1U << 4)
#define PFERR_FETCH_MASK (1U << 4)
#define PFERR_NESTED_MASK (1U << 31)


int kvm_mmu_get_spte_hierarchy(struct kvm_vcpu *vcpu, u64 addr, u64 sptes[4]);
int kvm_mmu_get_spte_hierarchy(struct kvm_vcpu *vcpu, u64 addr, u64 sptes[4]);
int kvm_init_shadow_mmu(struct kvm_vcpu *vcpu, struct kvm_mmu *context);
int kvm_init_shadow_mmu(struct kvm_vcpu *vcpu, struct kvm_mmu *context);
+4 −10
Original line number Original line Diff line number Diff line
@@ -342,18 +342,12 @@ void kvm_inject_page_fault(struct kvm_vcpu *vcpu)


void kvm_propagate_fault(struct kvm_vcpu *vcpu)
void kvm_propagate_fault(struct kvm_vcpu *vcpu)
{
{
	u32 nested, error;
	if (mmu_is_nested(vcpu) && !vcpu->arch.fault.nested)

	error   = vcpu->arch.fault.error_code;
	nested  = error &  PFERR_NESTED_MASK;
	error   = error & ~PFERR_NESTED_MASK;

	vcpu->arch.fault.error_code = error;

	if (mmu_is_nested(vcpu) && !nested)
		vcpu->arch.nested_mmu.inject_page_fault(vcpu);
		vcpu->arch.nested_mmu.inject_page_fault(vcpu);
	else
	else
		vcpu->arch.mmu.inject_page_fault(vcpu);
		vcpu->arch.mmu.inject_page_fault(vcpu);

	vcpu->arch.fault.nested = false;
}
}


void kvm_inject_nmi(struct kvm_vcpu *vcpu)
void kvm_inject_nmi(struct kvm_vcpu *vcpu)
@@ -3524,7 +3518,7 @@ static gpa_t translate_nested_gpa(struct kvm_vcpu *vcpu, gpa_t gpa, u32 access)
	access |= PFERR_USER_MASK;
	access |= PFERR_USER_MASK;
	t_gpa  = vcpu->arch.mmu.gva_to_gpa(vcpu, gpa, access, &error);
	t_gpa  = vcpu->arch.mmu.gva_to_gpa(vcpu, gpa, access, &error);
	if (t_gpa == UNMAPPED_GVA)
	if (t_gpa == UNMAPPED_GVA)
		vcpu->arch.fault.error_code |= PFERR_NESTED_MASK;
		vcpu->arch.fault.nested = true;


	return t_gpa;
	return t_gpa;
}
}