Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit a052b42b authored by Xiao Guangrong's avatar Xiao Guangrong Committed by Avi Kivity
Browse files

KVM: MMU: move prefetch_invalid_gpte out of pagaing_tmp.h



The function does not depend on guest mmu mode, move it out from
paging_tmpl.h

Signed-off-by: default avatarXiao Guangrong <xiaoguangrong@linux.vnet.ibm.com>
Signed-off-by: default avatarAvi Kivity <avi@redhat.com>
parent d4878f24
Loading
Loading
Loading
Loading
+28 −8
Original line number Original line Diff line number Diff line
@@ -2506,6 +2506,14 @@ static void nonpaging_new_cr3(struct kvm_vcpu *vcpu)
	mmu_free_roots(vcpu);
	mmu_free_roots(vcpu);
}
}


static bool is_rsvd_bits_set(struct kvm_mmu *mmu, u64 gpte, int level)
{
	int bit7;

	bit7 = (gpte >> 7) & 1;
	return (gpte & mmu->rsvd_bits_mask[bit7][level-1]) != 0;
}

static pfn_t pte_prefetch_gfn_to_pfn(struct kvm_vcpu *vcpu, gfn_t gfn,
static pfn_t pte_prefetch_gfn_to_pfn(struct kvm_vcpu *vcpu, gfn_t gfn,
				     bool no_dirty_log)
				     bool no_dirty_log)
{
{
@@ -2518,6 +2526,26 @@ static pfn_t pte_prefetch_gfn_to_pfn(struct kvm_vcpu *vcpu, gfn_t gfn,
	return gfn_to_pfn_memslot_atomic(slot, gfn);
	return gfn_to_pfn_memslot_atomic(slot, gfn);
}
}


static bool prefetch_invalid_gpte(struct kvm_vcpu *vcpu,
				  struct kvm_mmu_page *sp, u64 *spte,
				  u64 gpte)
{
	if (is_rsvd_bits_set(&vcpu->arch.mmu, gpte, PT_PAGE_TABLE_LEVEL))
		goto no_present;

	if (!is_present_gpte(gpte))
		goto no_present;

	if (!(gpte & PT_ACCESSED_MASK))
		goto no_present;

	return false;

no_present:
	drop_spte(vcpu->kvm, spte);
	return true;
}

static int direct_pte_prefetch_many(struct kvm_vcpu *vcpu,
static int direct_pte_prefetch_many(struct kvm_vcpu *vcpu,
				    struct kvm_mmu_page *sp,
				    struct kvm_mmu_page *sp,
				    u64 *start, u64 *end)
				    u64 *start, u64 *end)
@@ -3395,14 +3423,6 @@ static void paging_free(struct kvm_vcpu *vcpu)
	nonpaging_free(vcpu);
	nonpaging_free(vcpu);
}
}


static bool is_rsvd_bits_set(struct kvm_mmu *mmu, u64 gpte, int level)
{
	int bit7;

	bit7 = (gpte >> 7) & 1;
	return (gpte & mmu->rsvd_bits_mask[bit7][level-1]) != 0;
}

static inline void protect_clean_gpte(unsigned *access, unsigned gpte)
static inline void protect_clean_gpte(unsigned *access, unsigned gpte)
{
{
	unsigned mask;
	unsigned mask;
+3 −23
Original line number Original line Diff line number Diff line
@@ -305,26 +305,6 @@ static int FNAME(walk_addr_nested)(struct guest_walker *walker,
					addr, access);
					addr, access);
}
}


static bool FNAME(prefetch_invalid_gpte)(struct kvm_vcpu *vcpu,
				    struct kvm_mmu_page *sp, u64 *spte,
				    pt_element_t gpte)
{
	if (is_rsvd_bits_set(&vcpu->arch.mmu, gpte, PT_PAGE_TABLE_LEVEL))
		goto no_present;

	if (!is_present_gpte(gpte))
		goto no_present;

	if (!(gpte & PT_ACCESSED_MASK))
		goto no_present;

	return false;

no_present:
	drop_spte(vcpu->kvm, spte);
	return true;
}

static void FNAME(update_pte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
static void FNAME(update_pte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
			      u64 *spte, const void *pte)
			      u64 *spte, const void *pte)
{
{
@@ -333,7 +313,7 @@ static void FNAME(update_pte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
	pfn_t pfn;
	pfn_t pfn;


	gpte = *(const pt_element_t *)pte;
	gpte = *(const pt_element_t *)pte;
	if (FNAME(prefetch_invalid_gpte)(vcpu, sp, spte, gpte))
	if (prefetch_invalid_gpte(vcpu, sp, spte, gpte))
		return;
		return;


	pgprintk("%s: gpte %llx spte %p\n", __func__, (u64)gpte, spte);
	pgprintk("%s: gpte %llx spte %p\n", __func__, (u64)gpte, spte);
@@ -408,7 +388,7 @@ static void FNAME(pte_prefetch)(struct kvm_vcpu *vcpu, struct guest_walker *gw,


		gpte = gptep[i];
		gpte = gptep[i];


		if (FNAME(prefetch_invalid_gpte)(vcpu, sp, spte, gpte))
		if (prefetch_invalid_gpte(vcpu, sp, spte, gpte))
			continue;
			continue;


		pte_access = sp->role.access & gpte_access(vcpu, gpte);
		pte_access = sp->role.access & gpte_access(vcpu, gpte);
@@ -751,7 +731,7 @@ static int FNAME(sync_page)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
					  sizeof(pt_element_t)))
					  sizeof(pt_element_t)))
			return -EINVAL;
			return -EINVAL;


		if (FNAME(prefetch_invalid_gpte)(vcpu, sp, &sp->spt[i], gpte)) {
		if (prefetch_invalid_gpte(vcpu, sp, &sp->spt[i], gpte)) {
			vcpu->kvm->tlbs_dirty++;
			vcpu->kvm->tlbs_dirty++;
			continue;
			continue;
		}
		}