Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 26eeb53c authored by Wanpeng Li's avatar Wanpeng Li Committed by Paolo Bonzini
Browse files

KVM: MMU: Bail out immediately if there is no available mmu page



Bailing out immediately if there is no available mmu page to alloc.

Cc: Paolo Bonzini <pbonzini@redhat.com>
Cc: Radim Krčmář <rkrcmar@redhat.com>
Signed-off-by: default avatarWanpeng Li <wanpeng.li@hotmail.com>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent 42bcbebf
Loading
Loading
Loading
Loading
+27 −9
Original line number Original line Diff line number Diff line
@@ -3257,7 +3257,7 @@ static bool fast_page_fault(struct kvm_vcpu *vcpu, gva_t gva, int level,


static bool try_async_pf(struct kvm_vcpu *vcpu, bool prefault, gfn_t gfn,
static bool try_async_pf(struct kvm_vcpu *vcpu, bool prefault, gfn_t gfn,
			 gva_t gva, kvm_pfn_t *pfn, bool write, bool *writable);
			 gva_t gva, kvm_pfn_t *pfn, bool write, bool *writable);
static void make_mmu_pages_available(struct kvm_vcpu *vcpu);
static int make_mmu_pages_available(struct kvm_vcpu *vcpu);


static int nonpaging_map(struct kvm_vcpu *vcpu, gva_t v, u32 error_code,
static int nonpaging_map(struct kvm_vcpu *vcpu, gva_t v, u32 error_code,
			 gfn_t gfn, bool prefault)
			 gfn_t gfn, bool prefault)
@@ -3297,7 +3297,8 @@ static int nonpaging_map(struct kvm_vcpu *vcpu, gva_t v, u32 error_code,
	spin_lock(&vcpu->kvm->mmu_lock);
	spin_lock(&vcpu->kvm->mmu_lock);
	if (mmu_notifier_retry(vcpu->kvm, mmu_seq))
	if (mmu_notifier_retry(vcpu->kvm, mmu_seq))
		goto out_unlock;
		goto out_unlock;
	make_mmu_pages_available(vcpu);
	if (make_mmu_pages_available(vcpu) < 0)
		goto out_unlock;
	if (likely(!force_pt_level))
	if (likely(!force_pt_level))
		transparent_hugepage_adjust(vcpu, &gfn, &pfn, &level);
		transparent_hugepage_adjust(vcpu, &gfn, &pfn, &level);
	r = __direct_map(vcpu, write, map_writable, level, gfn, pfn, prefault);
	r = __direct_map(vcpu, write, map_writable, level, gfn, pfn, prefault);
@@ -3376,7 +3377,10 @@ static int mmu_alloc_direct_roots(struct kvm_vcpu *vcpu)


	if (vcpu->arch.mmu.shadow_root_level == PT64_ROOT_LEVEL) {
	if (vcpu->arch.mmu.shadow_root_level == PT64_ROOT_LEVEL) {
		spin_lock(&vcpu->kvm->mmu_lock);
		spin_lock(&vcpu->kvm->mmu_lock);
		make_mmu_pages_available(vcpu);
		if(make_mmu_pages_available(vcpu) < 0) {
			spin_unlock(&vcpu->kvm->mmu_lock);
			return 1;
		}
		sp = kvm_mmu_get_page(vcpu, 0, 0, PT64_ROOT_LEVEL, 1, ACC_ALL);
		sp = kvm_mmu_get_page(vcpu, 0, 0, PT64_ROOT_LEVEL, 1, ACC_ALL);
		++sp->root_count;
		++sp->root_count;
		spin_unlock(&vcpu->kvm->mmu_lock);
		spin_unlock(&vcpu->kvm->mmu_lock);
@@ -3387,7 +3391,10 @@ static int mmu_alloc_direct_roots(struct kvm_vcpu *vcpu)


			MMU_WARN_ON(VALID_PAGE(root));
			MMU_WARN_ON(VALID_PAGE(root));
			spin_lock(&vcpu->kvm->mmu_lock);
			spin_lock(&vcpu->kvm->mmu_lock);
			make_mmu_pages_available(vcpu);
			if (make_mmu_pages_available(vcpu) < 0) {
				spin_unlock(&vcpu->kvm->mmu_lock);
				return 1;
			}
			sp = kvm_mmu_get_page(vcpu, i << (30 - PAGE_SHIFT),
			sp = kvm_mmu_get_page(vcpu, i << (30 - PAGE_SHIFT),
					i << 30, PT32_ROOT_LEVEL, 1, ACC_ALL);
					i << 30, PT32_ROOT_LEVEL, 1, ACC_ALL);
			root = __pa(sp->spt);
			root = __pa(sp->spt);
@@ -3424,7 +3431,10 @@ static int mmu_alloc_shadow_roots(struct kvm_vcpu *vcpu)
		MMU_WARN_ON(VALID_PAGE(root));
		MMU_WARN_ON(VALID_PAGE(root));


		spin_lock(&vcpu->kvm->mmu_lock);
		spin_lock(&vcpu->kvm->mmu_lock);
		make_mmu_pages_available(vcpu);
		if (make_mmu_pages_available(vcpu) < 0) {
			spin_unlock(&vcpu->kvm->mmu_lock);
			return 1;
		}
		sp = kvm_mmu_get_page(vcpu, root_gfn, 0, PT64_ROOT_LEVEL,
		sp = kvm_mmu_get_page(vcpu, root_gfn, 0, PT64_ROOT_LEVEL,
				      0, ACC_ALL);
				      0, ACC_ALL);
		root = __pa(sp->spt);
		root = __pa(sp->spt);
@@ -3458,7 +3468,10 @@ static int mmu_alloc_shadow_roots(struct kvm_vcpu *vcpu)
				return 1;
				return 1;
		}
		}
		spin_lock(&vcpu->kvm->mmu_lock);
		spin_lock(&vcpu->kvm->mmu_lock);
		make_mmu_pages_available(vcpu);
		if (make_mmu_pages_available(vcpu) < 0) {
			spin_unlock(&vcpu->kvm->mmu_lock);
			return 1;
		}
		sp = kvm_mmu_get_page(vcpu, root_gfn, i << 30, PT32_ROOT_LEVEL,
		sp = kvm_mmu_get_page(vcpu, root_gfn, i << 30, PT32_ROOT_LEVEL,
				      0, ACC_ALL);
				      0, ACC_ALL);
		root = __pa(sp->spt);
		root = __pa(sp->spt);
@@ -3867,7 +3880,8 @@ static int tdp_page_fault(struct kvm_vcpu *vcpu, gva_t gpa, u32 error_code,
	spin_lock(&vcpu->kvm->mmu_lock);
	spin_lock(&vcpu->kvm->mmu_lock);
	if (mmu_notifier_retry(vcpu->kvm, mmu_seq))
	if (mmu_notifier_retry(vcpu->kvm, mmu_seq))
		goto out_unlock;
		goto out_unlock;
	make_mmu_pages_available(vcpu);
	if (make_mmu_pages_available(vcpu) < 0)
		goto out_unlock;
	if (likely(!force_pt_level))
	if (likely(!force_pt_level))
		transparent_hugepage_adjust(vcpu, &gfn, &pfn, &level);
		transparent_hugepage_adjust(vcpu, &gfn, &pfn, &level);
	r = __direct_map(vcpu, write, map_writable, level, gfn, pfn, prefault);
	r = __direct_map(vcpu, write, map_writable, level, gfn, pfn, prefault);
@@ -4786,12 +4800,12 @@ int kvm_mmu_unprotect_page_virt(struct kvm_vcpu *vcpu, gva_t gva)
}
}
EXPORT_SYMBOL_GPL(kvm_mmu_unprotect_page_virt);
EXPORT_SYMBOL_GPL(kvm_mmu_unprotect_page_virt);


static void make_mmu_pages_available(struct kvm_vcpu *vcpu)
static int make_mmu_pages_available(struct kvm_vcpu *vcpu)
{
{
	LIST_HEAD(invalid_list);
	LIST_HEAD(invalid_list);


	if (likely(kvm_mmu_available_pages(vcpu->kvm) >= KVM_MIN_FREE_MMU_PAGES))
	if (likely(kvm_mmu_available_pages(vcpu->kvm) >= KVM_MIN_FREE_MMU_PAGES))
		return;
		return 0;


	while (kvm_mmu_available_pages(vcpu->kvm) < KVM_REFILL_PAGES) {
	while (kvm_mmu_available_pages(vcpu->kvm) < KVM_REFILL_PAGES) {
		if (!prepare_zap_oldest_mmu_page(vcpu->kvm, &invalid_list))
		if (!prepare_zap_oldest_mmu_page(vcpu->kvm, &invalid_list))
@@ -4800,6 +4814,10 @@ static void make_mmu_pages_available(struct kvm_vcpu *vcpu)
		++vcpu->kvm->stat.mmu_recycled;
		++vcpu->kvm->stat.mmu_recycled;
	}
	}
	kvm_mmu_commit_zap_page(vcpu->kvm, &invalid_list);
	kvm_mmu_commit_zap_page(vcpu->kvm, &invalid_list);

	if (!kvm_mmu_available_pages(vcpu->kvm))
		return -ENOSPC;
	return 0;
}
}


int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t cr2, u64 error_code,
int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t cr2, u64 error_code,
+2 −1
Original line number Original line Diff line number Diff line
@@ -819,7 +819,8 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr, u32 error_code,
		goto out_unlock;
		goto out_unlock;


	kvm_mmu_audit(vcpu, AUDIT_PRE_PAGE_FAULT);
	kvm_mmu_audit(vcpu, AUDIT_PRE_PAGE_FAULT);
	make_mmu_pages_available(vcpu);
	if (make_mmu_pages_available(vcpu) < 0)
		goto out_unlock;
	if (!force_pt_level)
	if (!force_pt_level)
		transparent_hugepage_adjust(vcpu, &walker.gfn, &pfn, &level);
		transparent_hugepage_adjust(vcpu, &walker.gfn, &pfn, &level);
	r = FNAME(fetch)(vcpu, addr, &walker, write_fault,
	r = FNAME(fetch)(vcpu, addr, &walker, write_fault,