Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit d7c55201 authored by Xiao Guangrong's avatar Xiao Guangrong Committed by Avi Kivity
Browse files

KVM: MMU: abstract some functions to handle fault pfn



Introduce handle_abnormal_pfn to handle fault pfn on page fault path,
introduce mmu_invalid_pfn to handle fault pfn on prefetch path

It is the preparing work for mmio page fault support

Signed-off-by: default avatarXiao Guangrong <xiaoguangrong@cn.fujitsu.com>
Signed-off-by: default avatarAvi Kivity <avi@redhat.com>
parent fce92dce
Loading
Loading
Loading
Loading
+35 −12
Original line number Diff line number Diff line
@@ -2221,18 +2221,15 @@ static void kvm_send_hwpoison_signal(unsigned long address, struct task_struct *
	send_sig_info(SIGBUS, &info, tsk);
}

static int kvm_handle_bad_page(struct kvm_vcpu *vcpu, gva_t gva,
			       unsigned access, gfn_t gfn, pfn_t pfn)
static int kvm_handle_bad_page(struct kvm_vcpu *vcpu, gfn_t gfn, pfn_t pfn)
{
	kvm_release_pfn_clean(pfn);
	if (is_hwpoison_pfn(pfn)) {
		kvm_send_hwpoison_signal(gfn_to_hva(vcpu->kvm, gfn), current);
		return 0;
	} else if (is_fault_pfn(pfn))
		return -EFAULT;
	}

	vcpu_cache_mmio_info(vcpu, gva, gfn, access);
	return 1;
	return -EFAULT;
}

static void transparent_hugepage_adjust(struct kvm_vcpu *vcpu,
@@ -2277,6 +2274,33 @@ static void transparent_hugepage_adjust(struct kvm_vcpu *vcpu,
	}
}

static bool mmu_invalid_pfn(pfn_t pfn)
{
	return unlikely(is_invalid_pfn(pfn) || is_noslot_pfn(pfn));
}

static bool handle_abnormal_pfn(struct kvm_vcpu *vcpu, gva_t gva, gfn_t gfn,
				pfn_t pfn, unsigned access, int *ret_val)
{
	bool ret = true;

	/* The pfn is invalid, report the error! */
	if (unlikely(is_invalid_pfn(pfn))) {
		*ret_val = kvm_handle_bad_page(vcpu, gfn, pfn);
		goto exit;
	}

	if (unlikely(is_noslot_pfn(pfn))) {
		vcpu_cache_mmio_info(vcpu, gva, gfn, access);
		*ret_val = 1;
		goto exit;
	}

	ret = false;
exit:
	return ret;
}

static bool try_async_pf(struct kvm_vcpu *vcpu, bool prefault, gfn_t gfn,
			 gva_t gva, pfn_t *pfn, bool write, bool *writable);

@@ -2311,9 +2335,8 @@ static int nonpaging_map(struct kvm_vcpu *vcpu, gva_t v, int write, gfn_t gfn,
	if (try_async_pf(vcpu, prefault, gfn, v, &pfn, write, &map_writable))
		return 0;

	/* mmio */
	if (is_error_pfn(pfn))
		return kvm_handle_bad_page(vcpu, v, ACC_ALL, gfn, pfn);
	if (handle_abnormal_pfn(vcpu, v, gfn, pfn, ACC_ALL, &r))
		return r;

	spin_lock(&vcpu->kvm->mmu_lock);
	if (mmu_notifier_retry(vcpu, mmu_seq))
@@ -2685,9 +2708,9 @@ static int tdp_page_fault(struct kvm_vcpu *vcpu, gva_t gpa, u32 error_code,
	if (try_async_pf(vcpu, prefault, gfn, gpa, &pfn, write, &map_writable))
		return 0;

	/* mmio */
	if (is_error_pfn(pfn))
		return kvm_handle_bad_page(vcpu, 0, 0, gfn, pfn);
	if (handle_abnormal_pfn(vcpu, 0, gfn, pfn, ACC_ALL, &r))
		return r;

	spin_lock(&vcpu->kvm->mmu_lock);
	if (mmu_notifier_retry(vcpu, mmu_seq))
		goto out_unlock;
+6 −6
Original line number Diff line number Diff line
@@ -367,7 +367,7 @@ static void FNAME(update_pte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
	pgprintk("%s: gpte %llx spte %p\n", __func__, (u64)gpte, spte);
	pte_access = sp->role.access & FNAME(gpte_access)(vcpu, gpte, true);
	pfn = gfn_to_pfn_atomic(vcpu->kvm, gpte_to_gfn(gpte));
	if (is_error_pfn(pfn)) {
	if (mmu_invalid_pfn(pfn)) {
		kvm_release_pfn_clean(pfn);
		return;
	}
@@ -445,7 +445,7 @@ static void FNAME(pte_prefetch)(struct kvm_vcpu *vcpu, struct guest_walker *gw,
		gfn = gpte_to_gfn(gpte);
		pfn = pte_prefetch_gfn_to_pfn(vcpu, gfn,
				      pte_access & ACC_WRITE_MASK);
		if (is_error_pfn(pfn)) {
		if (mmu_invalid_pfn(pfn)) {
			kvm_release_pfn_clean(pfn);
			break;
		}
@@ -615,10 +615,10 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr, u32 error_code,
			 &map_writable))
		return 0;

	/* mmio */
	if (is_error_pfn(pfn))
		return kvm_handle_bad_page(vcpu, mmu_is_nested(vcpu) ? 0 :
				      addr, walker.pte_access, walker.gfn, pfn);
	if (handle_abnormal_pfn(vcpu, mmu_is_nested(vcpu) ? 0 : addr,
				walker.gfn, pfn, walker.pte_access, &r))
		return r;

	spin_lock(&vcpu->kvm->mmu_lock);
	if (mmu_notifier_retry(vcpu, mmu_seq))
		goto out_unlock;