Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 4c2155ce authored by Marcelo Tosatti's avatar Marcelo Tosatti Committed by Avi Kivity
Browse files

KVM: switch to get_user_pages_fast



Convert gfn_to_pfn to use get_user_pages_fast, which can do lockless
pagetable lookups on x86. Kernel compilation on 4-way guest is 3.7%
faster on VMX.

Signed-off-by: default avatarMarcelo Tosatti <mtosatti@redhat.com>
Signed-off-by: default avatarAvi Kivity <avi@redhat.com>
parent 777b3f49
Loading
Loading
Loading
Loading
+0 −2
Original line number Diff line number Diff line
@@ -147,9 +147,7 @@ void kvmppc_mmu_map(struct kvm_vcpu *vcpu, u64 gvaddr, gfn_t gfn, u64 asid,
	stlbe = &vcpu->arch.shadow_tlb[victim];

	/* Get reference to new page. */
	down_read(&current->mm->mmap_sem);
	new_page = gfn_to_page(vcpu->kvm, gfn);
	up_read(&current->mm->mmap_sem);
	if (is_error_page(new_page)) {
		printk(KERN_ERR "Couldn't get guest page for gfn %lx!\n", gfn);
		kvm_release_page_clean(new_page);
+9 −14
Original line number Diff line number Diff line
@@ -405,16 +405,19 @@ static int host_largepage_backed(struct kvm *kvm, gfn_t gfn)
{
	struct vm_area_struct *vma;
	unsigned long addr;
	int ret = 0;

	addr = gfn_to_hva(kvm, gfn);
	if (kvm_is_error_hva(addr))
		return 0;
		return ret;

	down_read(&current->mm->mmap_sem);
	vma = find_vma(current->mm, addr);
	if (vma && is_vm_hugetlb_page(vma))
		return 1;
		ret = 1;
	up_read(&current->mm->mmap_sem);

	return 0;
	return ret;
}

static int is_largepage_backed(struct kvm_vcpu *vcpu, gfn_t large_gfn)
@@ -1140,9 +1143,7 @@ struct page *gva_to_page(struct kvm_vcpu *vcpu, gva_t gva)
	if (gpa == UNMAPPED_GVA)
		return NULL;

	down_read(&current->mm->mmap_sem);
	page = gfn_to_page(vcpu->kvm, gpa >> PAGE_SHIFT);
	up_read(&current->mm->mmap_sem);

	return page;
}
@@ -1330,16 +1331,14 @@ static int nonpaging_map(struct kvm_vcpu *vcpu, gva_t v, int write, gfn_t gfn)
	pfn_t pfn;
	unsigned long mmu_seq;

	down_read(&current->mm->mmap_sem);
	if (is_largepage_backed(vcpu, gfn & ~(KVM_PAGES_PER_HPAGE-1))) {
		gfn &= ~(KVM_PAGES_PER_HPAGE-1);
		largepage = 1;
	}

	mmu_seq = vcpu->kvm->mmu_notifier_seq;
	/* implicit mb(), we'll read before PT lock is unlocked */
	smp_rmb();
	pfn = gfn_to_pfn(vcpu->kvm, gfn);
	up_read(&current->mm->mmap_sem);

	/* mmio */
	if (is_error_pfn(pfn)) {
@@ -1488,15 +1487,13 @@ static int tdp_page_fault(struct kvm_vcpu *vcpu, gva_t gpa,
	if (r)
		return r;

	down_read(&current->mm->mmap_sem);
	if (is_largepage_backed(vcpu, gfn & ~(KVM_PAGES_PER_HPAGE-1))) {
		gfn &= ~(KVM_PAGES_PER_HPAGE-1);
		largepage = 1;
	}
	mmu_seq = vcpu->kvm->mmu_notifier_seq;
	/* implicit mb(), we'll read before PT lock is unlocked */
	smp_rmb();
	pfn = gfn_to_pfn(vcpu->kvm, gfn);
	up_read(&current->mm->mmap_sem);
	if (is_error_pfn(pfn)) {
		kvm_release_pfn_clean(pfn);
		return 1;
@@ -1809,15 +1806,13 @@ static void mmu_guess_page_from_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
		return;
	gfn = (gpte & PT64_BASE_ADDR_MASK) >> PAGE_SHIFT;

	down_read(&current->mm->mmap_sem);
	if (is_large_pte(gpte) && is_largepage_backed(vcpu, gfn)) {
		gfn &= ~(KVM_PAGES_PER_HPAGE-1);
		vcpu->arch.update_pte.largepage = 1;
	}
	vcpu->arch.update_pte.mmu_seq = vcpu->kvm->mmu_notifier_seq;
	/* implicit mb(), we'll read before PT lock is unlocked */
	smp_rmb();
	pfn = gfn_to_pfn(vcpu->kvm, gfn);
	up_read(&current->mm->mmap_sem);

	if (is_error_pfn(pfn)) {
		kvm_release_pfn_clean(pfn);
+1 −7
Original line number Diff line number Diff line
@@ -102,14 +102,10 @@ static bool FNAME(cmpxchg_gpte)(struct kvm *kvm,
	pt_element_t *table;
	struct page *page;

	down_read(&current->mm->mmap_sem);
	page = gfn_to_page(kvm, table_gfn);
	up_read(&current->mm->mmap_sem);

	table = kmap_atomic(page, KM_USER0);

	ret = CMPXCHG(&table[index], orig_pte, new_pte);

	kunmap_atomic(table, KM_USER0);

	kvm_release_page_dirty(page);
@@ -418,7 +414,6 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr,
		return 0;
	}

	down_read(&current->mm->mmap_sem);
	if (walker.level == PT_DIRECTORY_LEVEL) {
		gfn_t large_gfn;
		large_gfn = walker.gfn & ~(KVM_PAGES_PER_HPAGE-1);
@@ -428,9 +423,8 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr,
		}
	}
	mmu_seq = vcpu->kvm->mmu_notifier_seq;
	/* implicit mb(), we'll read before PT lock is unlocked */
	smp_rmb();
	pfn = gfn_to_pfn(vcpu->kvm, walker.gfn);
	up_read(&current->mm->mmap_sem);

	/* mmio */
	if (is_error_pfn(pfn)) {
+0 −4
Original line number Diff line number Diff line
@@ -2010,9 +2010,7 @@ static int alloc_apic_access_page(struct kvm *kvm)
	if (r)
		goto out;

	down_read(&current->mm->mmap_sem);
	kvm->arch.apic_access_page = gfn_to_page(kvm, 0xfee00);
	up_read(&current->mm->mmap_sem);
out:
	up_write(&kvm->slots_lock);
	return r;
@@ -2034,10 +2032,8 @@ static int alloc_identity_pagetable(struct kvm *kvm)
	if (r)
		goto out;

	down_read(&current->mm->mmap_sem);
	kvm->arch.ept_identity_pagetable = gfn_to_page(kvm,
			VMX_EPT_IDENTITY_PAGETABLE_ADDR >> PAGE_SHIFT);
	up_read(&current->mm->mmap_sem);
out:
	up_write(&kvm->slots_lock);
	return r;
+0 −6
Original line number Diff line number Diff line
@@ -946,10 +946,8 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data)
		/* ...but clean it before doing the actual write */
		vcpu->arch.time_offset = data & ~(PAGE_MASK | 1);

		down_read(&current->mm->mmap_sem);
		vcpu->arch.time_page =
				gfn_to_page(vcpu->kvm, data >> PAGE_SHIFT);
		up_read(&current->mm->mmap_sem);

		if (is_error_page(vcpu->arch.time_page)) {
			kvm_release_page_clean(vcpu->arch.time_page);
@@ -2322,9 +2320,7 @@ static int emulator_cmpxchg_emulated(unsigned long addr,

		val = *(u64 *)new;

		down_read(&current->mm->mmap_sem);
		page = gfn_to_page(vcpu->kvm, gpa >> PAGE_SHIFT);
		up_read(&current->mm->mmap_sem);

		kaddr = kmap_atomic(page, KM_USER0);
		set_64bit((u64 *)(kaddr + offset_in_page(gpa)), val);
@@ -3089,9 +3085,7 @@ static void vapic_enter(struct kvm_vcpu *vcpu)
	if (!apic || !apic->vapic_addr)
		return;

	down_read(&current->mm->mmap_sem);
	page = gfn_to_page(vcpu->kvm, apic->vapic_addr >> PAGE_SHIFT);
	up_read(&current->mm->mmap_sem);

	vcpu->arch.apic->vapic_page = page;
}
Loading