Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 98bba238 authored by Takuya Yoshikawa's avatar Takuya Yoshikawa Committed by Paolo Bonzini
Browse files

KVM: x86: MMU: Move parent_pte handling from kvm_mmu_get_page() to link_shadow_page()



Every time kvm_mmu_get_page() is called with a non-NULL parent_pte
argument, link_shadow_page() follows that to set the parent entry so
that the new mapping will point to the returned page table.

Moving parent_pte handling there allows to clean up the code because
parent_pte is passed to kvm_mmu_get_page() just for mark_unsync() and
mmu_page_add_parent_pte().

In addition, the patch avoids calling mark_unsync() for other parents in
the sp->parent_ptes chain than the newly added parent_pte, because they
have been there since before the current page fault handling started.

Signed-off-by: default avatarTakuya Yoshikawa <yoshikawa_takuya_b1@lab.ntt.co.jp>
Cc: Xiao Guangrong <guangrong.xiao@linux.intel.com>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent 47005792
Loading
Loading
Loading
Loading
+9 −14
Original line number Diff line number Diff line
@@ -2119,12 +2119,8 @@ static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu,
		if (sp->unsync && kvm_sync_page_transient(vcpu, sp))
			break;

		mmu_page_add_parent_pte(vcpu, sp, parent_pte);
		if (sp->unsync_children) {
		if (sp->unsync_children)
			kvm_make_request(KVM_REQ_MMU_SYNC, vcpu);
			kvm_mmu_mark_parents_unsync(sp);
		} else if (sp->unsync)
			kvm_mmu_mark_parents_unsync(sp);

		__clear_sp_write_flooding_count(sp);
		trace_kvm_mmu_get_page(sp, false);
@@ -2135,8 +2131,6 @@ static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu,

	sp = kvm_mmu_alloc_page(vcpu, direct);

	mmu_page_add_parent_pte(vcpu, sp, parent_pte);

	sp->gfn = gfn;
	sp->role = role;
	hlist_add_head(&sp->hash_link,
@@ -2204,7 +2198,8 @@ static void shadow_walk_next(struct kvm_shadow_walk_iterator *iterator)
	return __shadow_walk_next(iterator, *iterator->sptep);
}

static void link_shadow_page(u64 *sptep, struct kvm_mmu_page *sp)
static void link_shadow_page(struct kvm_vcpu *vcpu, u64 *sptep,
			     struct kvm_mmu_page *sp)
{
	u64 spte;

@@ -2215,6 +2210,11 @@ static void link_shadow_page(u64 *sptep, struct kvm_mmu_page *sp)
	       shadow_user_mask | shadow_x_mask | shadow_accessed_mask;

	mmu_spte_set(sptep, spte);

	mmu_page_add_parent_pte(vcpu, sp, sptep);

	if (sp->unsync_children || sp->unsync)
		mark_unsync(sptep);
}

static void validate_direct_spte(struct kvm_vcpu *vcpu, u64 *sptep,
@@ -2273,11 +2273,6 @@ static void kvm_mmu_page_unlink_children(struct kvm *kvm,
		mmu_page_zap_pte(kvm, sp, sp->spt + i);
}

static void kvm_mmu_put_page(struct kvm_mmu_page *sp, u64 *parent_pte)
{
	mmu_page_remove_parent_pte(sp, parent_pte);
}

static void kvm_mmu_unlink_parents(struct kvm *kvm, struct kvm_mmu_page *sp)
{
	u64 *sptep;
@@ -2743,7 +2738,7 @@ static int __direct_map(struct kvm_vcpu *vcpu, int write, int map_writable,
					      iterator.level - 1,
					      1, ACC_ALL, iterator.sptep);

			link_shadow_page(iterator.sptep, sp);
			link_shadow_page(vcpu, iterator.sptep, sp);
		}
	}
	return emulate;
+2 −4
Original line number Diff line number Diff line
@@ -598,7 +598,7 @@ static int FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr,
			goto out_gpte_changed;

		if (sp)
			link_shadow_page(it.sptep, sp);
			link_shadow_page(vcpu, it.sptep, sp);
	}

	for (;
@@ -618,7 +618,7 @@ static int FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr,

		sp = kvm_mmu_get_page(vcpu, direct_gfn, addr, it.level-1,
				      true, direct_access, it.sptep);
		link_shadow_page(it.sptep, sp);
		link_shadow_page(vcpu, it.sptep, sp);
	}

	clear_sp_write_flooding_count(it.sptep);
@@ -629,8 +629,6 @@ static int FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr,
	return emulate;

out_gpte_changed:
	if (sp)
		kvm_mmu_put_page(sp, it.sptep);
	kvm_release_pfn_clean(pfn);
	return 0;
}