Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 9f1a122f authored by Xiao Guangrong's avatar Xiao Guangrong Committed by Avi Kivity
Browse files

KVM: MMU: allow more page become unsync at getting sp time



Allow more page become asynchronous at getting sp time, if need create new
shadow page for gfn but it not allow unsync(level > 1), we should unsync all
gfn's unsync page

Signed-off-by: default avatarXiao Guangrong <xiaoguangrong@cn.fujitsu.com>
Signed-off-by: default avatarAvi Kivity <avi@redhat.com>
parent 9cf5cf5a
Loading
Loading
Loading
Loading
+37 −10
Original line number Diff line number Diff line
@@ -1220,6 +1220,35 @@ static int kvm_sync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
	return __kvm_sync_page(vcpu, sp, true);
}

/* @gfn should be write-protected at the call site */
static void kvm_sync_pages(struct kvm_vcpu *vcpu,  gfn_t gfn)
{
	struct hlist_head *bucket;
	struct kvm_mmu_page *s;
	struct hlist_node *node, *n;
	unsigned index;
	bool flush = false;

	index = kvm_page_table_hashfn(gfn);
	bucket = &vcpu->kvm->arch.mmu_page_hash[index];
	hlist_for_each_entry_safe(s, node, n, bucket, hash_link) {
		if (s->gfn != gfn || !s->unsync || s->role.invalid)
			continue;

		WARN_ON(s->role.level != PT_PAGE_TABLE_LEVEL);
		if ((s->role.cr4_pae != !!is_pae(vcpu)) ||
			(vcpu->arch.mmu.sync_page(vcpu, s))) {
			kvm_mmu_zap_page(vcpu->kvm, s);
			continue;
		}
		kvm_unlink_unsync_page(vcpu->kvm, s);
		flush = true;
	}

	if (flush)
		kvm_mmu_flush_tlb(vcpu);
}

struct mmu_page_path {
	struct kvm_mmu_page *parent[PT64_ROOT_LEVEL-1];
	unsigned int idx[PT64_ROOT_LEVEL-1];
@@ -1318,8 +1347,9 @@ static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu,
	unsigned index;
	unsigned quadrant;
	struct hlist_head *bucket;
	struct kvm_mmu_page *sp, *unsync_sp = NULL;
	struct kvm_mmu_page *sp;
	struct hlist_node *node, *tmp;
	bool need_sync = false;

	role = vcpu->arch.mmu.base_role;
	role.level = level;
@@ -1336,17 +1366,14 @@ static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu,
	bucket = &vcpu->kvm->arch.mmu_page_hash[index];
	hlist_for_each_entry_safe(sp, node, tmp, bucket, hash_link)
		if (sp->gfn == gfn) {
			if (sp->unsync)
				unsync_sp = sp;
			if (!need_sync && sp->unsync)
				need_sync = true;

			if (sp->role.word != role.word)
				continue;

			if (!direct && unsync_sp &&
			      kvm_sync_page_transient(vcpu, unsync_sp)) {
				unsync_sp = NULL;
			if (sp->unsync && kvm_sync_page_transient(vcpu, sp))
				break;
			}

			mmu_page_add_parent_pte(vcpu, sp, parent_pte);
			if (sp->unsync_children) {
@@ -1358,9 +1385,6 @@ static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu,
			trace_kvm_mmu_get_page(sp, false);
			return sp;
		}
	if (!direct && unsync_sp)
		kvm_sync_page(vcpu, unsync_sp);

	++vcpu->kvm->stat.mmu_cache_miss;
	sp = kvm_mmu_alloc_page(vcpu, parent_pte);
	if (!sp)
@@ -1371,6 +1395,9 @@ static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu,
	if (!direct) {
		if (rmap_write_protect(vcpu->kvm, gfn))
			kvm_flush_remote_tlbs(vcpu->kvm);
		if (level > PT_PAGE_TABLE_LEVEL && need_sync)
			kvm_sync_pages(vcpu, gfn);

		account_shadowed(vcpu->kvm, gfn);
	}
	if (shadow_trap_nonpresent_pte != shadow_notrap_nonpresent_pte)