Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit cb9aaa30 authored by Xiao Guangrong's avatar Xiao Guangrong Committed by Avi Kivity
Browse files

KVM: do not release the error pfn



After commit a2766325, the error pfn is replaced by the
error code, it need not be released anymore

[ The patch has been compiling tested for powerpc ]

Signed-off-by: default avatarXiao Guangrong <xiaoguangrong@linux.vnet.ibm.com>
Signed-off-by: default avatarAvi Kivity <avi@redhat.com>
parent 6cede2e6
Loading
Loading
Loading
Loading
+0 −1
Original line number Diff line number Diff line
@@ -524,7 +524,6 @@ static inline void kvmppc_e500_shadow_map(struct kvmppc_vcpu_e500 *vcpu_e500,
		if (is_error_pfn(pfn)) {
			printk(KERN_ERR "Couldn't get real page for gfn %lx!\n",
					(long)gfn);
			kvm_release_pfn_clean(pfn);
			return;
		}

+3 −4
Original line number Diff line number Diff line
@@ -2496,6 +2496,8 @@ static void mmu_set_spte(struct kvm_vcpu *vcpu, u64 *sptep,
				rmap_recycle(vcpu, sptep, gfn);
		}
	}

	if (!is_error_pfn(pfn))
		kvm_release_pfn_clean(pfn);
}

@@ -2648,7 +2650,6 @@ static void kvm_send_hwpoison_signal(unsigned long address, struct task_struct *

static int kvm_handle_bad_page(struct kvm_vcpu *vcpu, gfn_t gfn, pfn_t pfn)
{
	kvm_release_pfn_clean(pfn);
	if (pfn == KVM_PFN_ERR_HWPOISON) {
		kvm_send_hwpoison_signal(gfn_to_hva(vcpu->kvm, gfn), current);
		return 0;
@@ -3273,8 +3274,6 @@ static bool try_async_pf(struct kvm_vcpu *vcpu, bool prefault, gfn_t gfn,
	if (!async)
		return false; /* *pfn has correct page already */

	kvm_release_pfn_clean(*pfn);

	if (!prefault && can_do_async_pf(vcpu)) {
		trace_kvm_try_async_get_page(gva, gfn);
		if (kvm_find_async_pf_gfn(vcpu, gfn)) {
+1 −3
Original line number Diff line number Diff line
@@ -116,10 +116,8 @@ static void audit_mappings(struct kvm_vcpu *vcpu, u64 *sptep, int level)
	gfn = kvm_mmu_page_get_gfn(sp, sptep - sp->spt);
	pfn = gfn_to_pfn_atomic(vcpu->kvm, gfn);

	if (is_error_pfn(pfn)) {
		kvm_release_pfn_clean(pfn);
	if (is_error_pfn(pfn))
		return;
	}

	hpa =  pfn << PAGE_SHIFT;
	if ((*sptep & PT64_BASE_ADDR_MASK) != hpa)
+2 −6
Original line number Diff line number Diff line
@@ -370,10 +370,8 @@ static void FNAME(update_pte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
	pgprintk("%s: gpte %llx spte %p\n", __func__, (u64)gpte, spte);
	pte_access = sp->role.access & FNAME(gpte_access)(vcpu, gpte, true);
	pfn = gfn_to_pfn_atomic(vcpu->kvm, gpte_to_gfn(gpte));
	if (mmu_invalid_pfn(pfn)) {
		kvm_release_pfn_clean(pfn);
	if (mmu_invalid_pfn(pfn))
		return;
	}

	/*
	 * we call mmu_set_spte() with host_writable = true because that
@@ -448,10 +446,8 @@ static void FNAME(pte_prefetch)(struct kvm_vcpu *vcpu, struct guest_walker *gw,
		gfn = gpte_to_gfn(gpte);
		pfn = pte_prefetch_gfn_to_pfn(vcpu, gfn,
				      pte_access & ACC_WRITE_MASK);
		if (mmu_invalid_pfn(pfn)) {
			kvm_release_pfn_clean(pfn);
		if (mmu_invalid_pfn(pfn))
			break;
		}

		mmu_set_spte(vcpu, spte, sp->role.access, pte_access, 0, 0,
			     NULL, PT_PAGE_TABLE_LEVEL, gfn,
+0 −1
Original line number Diff line number Diff line
@@ -107,7 +107,6 @@ int kvm_iommu_map_pages(struct kvm *kvm, struct kvm_memory_slot *slot)
		 */
		pfn = kvm_pin_pages(slot, gfn, page_size);
		if (is_error_pfn(pfn)) {
			kvm_release_pfn_clean(pfn);
			gfn += 1;
			continue;
		}
Loading