Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 0ad805a0 authored by Nadav Har'El's avatar Nadav Har'El Committed by Paolo Bonzini
Browse files

nEPT: Move common code to paging_tmpl.h



For preparation, we just move gpte_access(), prefetch_invalid_gpte(),
s_rsvd_bits_set(), protect_clean_gpte() and is_dirty_gpte() from mmu.c
to paging_tmpl.h.

Reviewed-by: default avatarXiao Guangrong <xiaoguangrong@linux.vnet.ibm.com>
Signed-off-by: default avatarNadav Har'El <nyh@il.ibm.com>
Signed-off-by: default avatarJun Nakajima <jun.nakajima@intel.com>
Signed-off-by: default avatarXinhao Xu <xinhao.xu@intel.com>
Signed-off-by: default avatarYang Zhang <yang.z.zhang@Intel.com>
Signed-off-by: default avatarJun Nakajima <jun.nakajima@intel.com>
Signed-off-by: default avatarGleb Natapov <gleb@redhat.com>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent b7e91450
Loading
Loading
Loading
Loading
+0 −55
Original line number Diff line number Diff line
@@ -331,11 +331,6 @@ static int is_large_pte(u64 pte)
	return pte & PT_PAGE_SIZE_MASK;
}

static int is_dirty_gpte(unsigned long pte)
{
	return pte & PT_DIRTY_MASK;
}

static int is_rmap_spte(u64 pte)
{
	return is_shadow_present_pte(pte);
@@ -2574,14 +2569,6 @@ static void nonpaging_new_cr3(struct kvm_vcpu *vcpu)
	mmu_free_roots(vcpu);
}

static bool is_rsvd_bits_set(struct kvm_mmu *mmu, u64 gpte, int level)
{
	int bit7;

	bit7 = (gpte >> 7) & 1;
	return (gpte & mmu->rsvd_bits_mask[bit7][level-1]) != 0;
}

static pfn_t pte_prefetch_gfn_to_pfn(struct kvm_vcpu *vcpu, gfn_t gfn,
				     bool no_dirty_log)
{
@@ -2594,26 +2581,6 @@ static pfn_t pte_prefetch_gfn_to_pfn(struct kvm_vcpu *vcpu, gfn_t gfn,
	return gfn_to_pfn_memslot_atomic(slot, gfn);
}

static bool prefetch_invalid_gpte(struct kvm_vcpu *vcpu,
				  struct kvm_mmu_page *sp, u64 *spte,
				  u64 gpte)
{
	if (is_rsvd_bits_set(&vcpu->arch.mmu, gpte, PT_PAGE_TABLE_LEVEL))
		goto no_present;

	if (!is_present_gpte(gpte))
		goto no_present;

	if (!(gpte & PT_ACCESSED_MASK))
		goto no_present;

	return false;

no_present:
	drop_spte(vcpu->kvm, spte);
	return true;
}

static int direct_pte_prefetch_many(struct kvm_vcpu *vcpu,
				    struct kvm_mmu_page *sp,
				    u64 *start, u64 *end)
@@ -3501,18 +3468,6 @@ static void paging_free(struct kvm_vcpu *vcpu)
	nonpaging_free(vcpu);
}

static inline void protect_clean_gpte(unsigned *access, unsigned gpte)
{
	unsigned mask;

	BUILD_BUG_ON(PT_WRITABLE_MASK != ACC_WRITE_MASK);

	mask = (unsigned)~ACC_WRITE_MASK;
	/* Allow write access to dirty gptes */
	mask |= (gpte >> (PT_DIRTY_SHIFT - PT_WRITABLE_SHIFT)) & PT_WRITABLE_MASK;
	*access &= mask;
}

static bool sync_mmio_spte(struct kvm *kvm, u64 *sptep, gfn_t gfn,
			   unsigned access, int *nr_present)
{
@@ -3530,16 +3485,6 @@ static bool sync_mmio_spte(struct kvm *kvm, u64 *sptep, gfn_t gfn,
	return false;
}

static inline unsigned gpte_access(struct kvm_vcpu *vcpu, u64 gpte)
{
	unsigned access;

	access = (gpte & (PT_WRITABLE_MASK | PT_USER_MASK)) | ACC_EXEC_MASK;
	access &= ~(gpte >> PT64_NX_SHIFT);

	return access;
}

static inline bool is_last_gpte(struct kvm_mmu *mmu, unsigned level, unsigned gpte)
{
	unsigned index;
+69 −12
Original line number Diff line number Diff line
@@ -80,6 +80,31 @@ static gfn_t gpte_to_gfn_lvl(pt_element_t gpte, int lvl)
	return (gpte & PT_LVL_ADDR_MASK(lvl)) >> PAGE_SHIFT;
}

static inline void FNAME(protect_clean_gpte)(unsigned *access, unsigned gpte)
{
	unsigned mask;

	BUILD_BUG_ON(PT_WRITABLE_MASK != ACC_WRITE_MASK);

	mask = (unsigned)~ACC_WRITE_MASK;
	/* Allow write access to dirty gptes */
	mask |= (gpte >> (PT_DIRTY_SHIFT - PT_WRITABLE_SHIFT)) & PT_WRITABLE_MASK;
	*access &= mask;
}

static bool FNAME(is_rsvd_bits_set)(struct kvm_mmu *mmu, u64 gpte, int level)
{
	int bit7;

	bit7 = (gpte >> 7) & 1;
	return (gpte & mmu->rsvd_bits_mask[bit7][level-1]) != 0;
}

static inline int FNAME(is_present_gpte)(unsigned long pte)
{
	return is_present_gpte(pte);
}

static int FNAME(cmpxchg_gpte)(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
			       pt_element_t __user *ptep_user, unsigned index,
			       pt_element_t orig_pte, pt_element_t new_pte)
@@ -103,6 +128,36 @@ static int FNAME(cmpxchg_gpte)(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
	return (ret != orig_pte);
}

static bool FNAME(prefetch_invalid_gpte)(struct kvm_vcpu *vcpu,
				  struct kvm_mmu_page *sp, u64 *spte,
				  u64 gpte)
{
	if (FNAME(is_rsvd_bits_set)(&vcpu->arch.mmu, gpte, PT_PAGE_TABLE_LEVEL))
		goto no_present;

	if (!FNAME(is_present_gpte)(gpte))
		goto no_present;

	if (!(gpte & PT_ACCESSED_MASK))
		goto no_present;

	return false;

no_present:
	drop_spte(vcpu->kvm, spte);
	return true;
}

static inline unsigned FNAME(gpte_access)(struct kvm_vcpu *vcpu, u64 gpte)
{
	unsigned access;

	access = (gpte & (PT_WRITABLE_MASK | PT_USER_MASK)) | ACC_EXEC_MASK;
	access &= ~(gpte >> PT64_NX_SHIFT);

	return access;
}

static int FNAME(update_accessed_dirty_bits)(struct kvm_vcpu *vcpu,
					     struct kvm_mmu *mmu,
					     struct guest_walker *walker,
@@ -123,7 +178,8 @@ static int FNAME(update_accessed_dirty_bits)(struct kvm_vcpu *vcpu,
			trace_kvm_mmu_set_accessed_bit(table_gfn, index, sizeof(pte));
			pte |= PT_ACCESSED_MASK;
		}
		if (level == walker->level && write_fault && !is_dirty_gpte(pte)) {
		if (level == walker->level && write_fault &&
				!(pte & PT_DIRTY_MASK)) {
			trace_kvm_mmu_set_dirty_bit(table_gfn, index, sizeof(pte));
			pte |= PT_DIRTY_MASK;
		}
@@ -170,7 +226,7 @@ retry_walk:
	if (walker->level == PT32E_ROOT_LEVEL) {
		pte = mmu->get_pdptr(vcpu, (addr >> 30) & 3);
		trace_kvm_mmu_paging_element(pte, walker->level);
		if (!is_present_gpte(pte))
		if (!FNAME(is_present_gpte)(pte))
			goto error;
		--walker->level;
	}
@@ -215,16 +271,17 @@ retry_walk:

		trace_kvm_mmu_paging_element(pte, walker->level);

		if (unlikely(!is_present_gpte(pte)))
		if (unlikely(!FNAME(is_present_gpte)(pte)))
			goto error;

		if (unlikely(is_rsvd_bits_set(mmu, pte, walker->level))) {
		if (unlikely(FNAME(is_rsvd_bits_set)(mmu, pte,
					             walker->level))) {
			errcode |= PFERR_RSVD_MASK | PFERR_PRESENT_MASK;
			goto error;
		}

		accessed_dirty &= pte;
		pte_access = pt_access & gpte_access(vcpu, pte);
		pte_access = pt_access & FNAME(gpte_access)(vcpu, pte);

		walker->ptes[walker->level - 1] = pte;
	} while (!is_last_gpte(mmu, walker->level, pte));
@@ -247,7 +304,7 @@ retry_walk:
	walker->gfn = real_gpa >> PAGE_SHIFT;

	if (!write_fault)
		protect_clean_gpte(&pte_access, pte);
		FNAME(protect_clean_gpte)(&pte_access, pte);
	else
		/*
		 * On a write fault, fold the dirty bit into accessed_dirty by
@@ -308,14 +365,14 @@ FNAME(prefetch_gpte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
	gfn_t gfn;
	pfn_t pfn;

	if (prefetch_invalid_gpte(vcpu, sp, spte, gpte))
	if (FNAME(prefetch_invalid_gpte)(vcpu, sp, spte, gpte))
		return false;

	pgprintk("%s: gpte %llx spte %p\n", __func__, (u64)gpte, spte);

	gfn = gpte_to_gfn(gpte);
	pte_access = sp->role.access & gpte_access(vcpu, gpte);
	protect_clean_gpte(&pte_access, gpte);
	pte_access = sp->role.access & FNAME(gpte_access)(vcpu, gpte);
	FNAME(protect_clean_gpte)(&pte_access, gpte);
	pfn = pte_prefetch_gfn_to_pfn(vcpu, gfn,
			no_dirty_log && (pte_access & ACC_WRITE_MASK));
	if (is_error_pfn(pfn))
@@ -784,15 +841,15 @@ static int FNAME(sync_page)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
					  sizeof(pt_element_t)))
			return -EINVAL;

		if (prefetch_invalid_gpte(vcpu, sp, &sp->spt[i], gpte)) {
		if (FNAME(prefetch_invalid_gpte)(vcpu, sp, &sp->spt[i], gpte)) {
			vcpu->kvm->tlbs_dirty++;
			continue;
		}

		gfn = gpte_to_gfn(gpte);
		pte_access = sp->role.access;
		pte_access &= gpte_access(vcpu, gpte);
		protect_clean_gpte(&pte_access, gpte);
		pte_access &= FNAME(gpte_access)(vcpu, gpte);
		FNAME(protect_clean_gpte)(&pte_access, gpte);

		if (sync_mmio_spte(vcpu->kvm, &sp->spt[i], gfn, pte_access,
		      &nr_present))