Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 332b207d authored by Xiao Guangrong's avatar Xiao Guangrong Committed by Avi Kivity
Browse files

KVM: MMU: optimize pte write path if don't have protected sp



Simply return from kvm_mmu_pte_write path if no shadow page is
write-protected, then we can avoid to walk all shadow pages and hold
mmu-lock

Signed-off-by: default avatarXiao Guangrong <xiaoguangrong@cn.fujitsu.com>
Signed-off-by: default avatarMarcelo Tosatti <mtosatti@redhat.com>
parent 96304217
Loading
Loading
Loading
Loading
+1 −0
Original line number Diff line number Diff line
@@ -441,6 +441,7 @@ struct kvm_arch {
	unsigned int n_used_mmu_pages;
	unsigned int n_requested_mmu_pages;
	unsigned int n_max_mmu_pages;
	unsigned int indirect_shadow_pages;
	atomic_t invlpg_counter;
	struct hlist_head mmu_page_hash[KVM_NUM_MMU_PAGES];
	/*
+9 −0
Original line number Diff line number Diff line
@@ -498,6 +498,7 @@ static void account_shadowed(struct kvm *kvm, gfn_t gfn)
		linfo = lpage_info_slot(gfn, slot, i);
		linfo->write_count += 1;
	}
	kvm->arch.indirect_shadow_pages++;
}

static void unaccount_shadowed(struct kvm *kvm, gfn_t gfn)
@@ -513,6 +514,7 @@ static void unaccount_shadowed(struct kvm *kvm, gfn_t gfn)
		linfo->write_count -= 1;
		WARN_ON(linfo->write_count < 0);
	}
	kvm->arch.indirect_shadow_pages--;
}

static int has_wrprotected_page(struct kvm *kvm,
@@ -3233,6 +3235,13 @@ void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
	int level, npte, invlpg_counter, r, flooded = 0;
	bool remote_flush, local_flush, zap_page;

	/*
	 * If we don't have indirect shadow pages, it means no page is
	 * write-protected, so we can exit simply.
	 */
	if (!ACCESS_ONCE(vcpu->kvm->arch.indirect_shadow_pages))
		return;

	zap_page = remote_flush = local_flush = false;
	offset = offset_in_page(gpa);