Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 7c562522 authored by Xiao Guangrong's avatar Xiao Guangrong Committed by Avi Kivity
Browse files

KVM: MMU: remove mmu_seq verification on pte update path



The mmu_seq verification can be removed since we get the pfn in the
protection of mmu_lock.

Signed-off-by: default avatarXiao Guangrong <xiaoguangrong@cn.fujitsu.com>
Signed-off-by: default avatarAvi Kivity <avi@redhat.com>
parent a0c0ab2f
Loading
Loading
Loading
Loading
+1 −1
Original line number Diff line number Diff line
@@ -274,7 +274,7 @@ struct kvm_mmu {
			 struct kvm_mmu_page *sp);
	void (*invlpg)(struct kvm_vcpu *vcpu, gva_t gva);
	void (*update_pte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
			u64 *spte, const void *pte, unsigned long mmu_seq);
			   u64 *spte, const void *pte);
	hpa_t root_hpa;
	int root_level;
	int shadow_root_level;
+5 −11
Original line number Diff line number Diff line
@@ -1206,7 +1206,7 @@ static void nonpaging_invlpg(struct kvm_vcpu *vcpu, gva_t gva)

static void nonpaging_update_pte(struct kvm_vcpu *vcpu,
				 struct kvm_mmu_page *sp, u64 *spte,
				 const void *pte, unsigned long mmu_seq)
				 const void *pte)
{
	WARN_ON(1);
}
@@ -3163,9 +3163,8 @@ static void mmu_pte_write_zap_pte(struct kvm_vcpu *vcpu,
}

static void mmu_pte_write_new_pte(struct kvm_vcpu *vcpu,
				  struct kvm_mmu_page *sp,
				  u64 *spte,
				  const void *new, unsigned long mmu_seq)
				  struct kvm_mmu_page *sp, u64 *spte,
				  const void *new)
{
	if (sp->role.level != PT_PAGE_TABLE_LEVEL) {
		++vcpu->kvm->stat.mmu_pde_zapped;
@@ -3173,7 +3172,7 @@ static void mmu_pte_write_new_pte(struct kvm_vcpu *vcpu,
        }

	++vcpu->kvm->stat.mmu_pte_updated;
	vcpu->arch.mmu.update_pte(vcpu, sp, spte, new, mmu_seq);
	vcpu->arch.mmu.update_pte(vcpu, sp, spte, new);
}

static bool need_remote_flush(u64 old, u64 new)
@@ -3229,7 +3228,6 @@ void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
	struct kvm_mmu_page *sp;
	struct hlist_node *node;
	LIST_HEAD(invalid_list);
	unsigned long mmu_seq;
	u64 entry, gentry, *spte;
	unsigned pte_size, page_offset, misaligned, quadrant, offset;
	int level, npte, invlpg_counter, r, flooded = 0;
@@ -3271,9 +3269,6 @@ void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
		break;
	}

	mmu_seq = vcpu->kvm->mmu_notifier_seq;
	smp_rmb();

	spin_lock(&vcpu->kvm->mmu_lock);
	if (atomic_read(&vcpu->kvm->arch.invlpg_counter) != invlpg_counter)
		gentry = 0;
@@ -3345,8 +3340,7 @@ void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
			if (gentry &&
			      !((sp->role.word ^ vcpu->arch.mmu.base_role.word)
			      & mask.word))
				mmu_pte_write_new_pte(vcpu, sp, spte, &gentry,
						      mmu_seq);
				mmu_pte_write_new_pte(vcpu, sp, spte, &gentry);
			if (!remote_flush && need_remote_flush(entry, *spte))
				remote_flush = true;
			++spte;
+1 −3
Original line number Diff line number Diff line
@@ -325,7 +325,7 @@ static bool FNAME(prefetch_invalid_gpte)(struct kvm_vcpu *vcpu,
}

static void FNAME(update_pte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
			      u64 *spte, const void *pte, unsigned long mmu_seq)
			      u64 *spte, const void *pte)
{
	pt_element_t gpte;
	unsigned pte_access;
@@ -342,8 +342,6 @@ static void FNAME(update_pte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
		kvm_release_pfn_clean(pfn);
		return;
	}
	if (mmu_notifier_retry(vcpu, mmu_seq))
		return;

	/*
	 * we call mmu_set_spte() with host_writable = true because that