Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 1342d353 authored by Avi Kivity's avatar Avi Kivity Committed by Linus Torvalds
Browse files

[PATCH] KVM: MMU: Load the pae pdptrs on cr3 change like the processor does



In pae mode, a load of cr3 loads the four third-level page table entries in
addition to cr3 itself.

Signed-off-by: default avatarAvi Kivity <avi@qumranet.com>
Acked-by: default avatarIngo Molnar <mingo@elte.hu>
Signed-off-by: default avatarAndrew Morton <akpm@osdl.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@osdl.org>
parent 6bcbd6ab
Loading
Loading
Loading
Loading
+1 −0
Original line number Diff line number Diff line
@@ -185,6 +185,7 @@ struct kvm_vcpu {
	unsigned long cr3;
	unsigned long cr4;
	unsigned long cr8;
	u64 pdptrs[4]; /* pae */
	u64 shadow_efer;
	u64 apic_base;
	int nmsrs;
+19 −10
Original line number Diff line number Diff line
@@ -298,14 +298,17 @@ static void inject_gp(struct kvm_vcpu *vcpu)
	kvm_arch_ops->inject_gp(vcpu, 0);
}

static int pdptrs_have_reserved_bits_set(struct kvm_vcpu *vcpu,
					 unsigned long cr3)
/*
 * Load the pae pdptrs.  Return true is they are all valid.
 */
static int load_pdptrs(struct kvm_vcpu *vcpu, unsigned long cr3)
{
	gfn_t pdpt_gfn = cr3 >> PAGE_SHIFT;
	unsigned offset = (cr3 & (PAGE_SIZE-1)) >> 5;
	unsigned offset = ((cr3 & (PAGE_SIZE-1)) >> 5) << 2;
	int i;
	u64 pdpte;
	u64 *pdpt;
	int ret;
	struct kvm_memory_slot *memslot;

	spin_lock(&vcpu->kvm->lock);
@@ -313,16 +316,23 @@ static int pdptrs_have_reserved_bits_set(struct kvm_vcpu *vcpu,
	/* FIXME: !memslot - emulate? 0xff? */
	pdpt = kmap_atomic(gfn_to_page(memslot, pdpt_gfn), KM_USER0);

	ret = 1;
	for (i = 0; i < 4; ++i) {
		pdpte = pdpt[offset + i];
		if ((pdpte & 1) && (pdpte & 0xfffffff0000001e6ull))
			break;
		if ((pdpte & 1) && (pdpte & 0xfffffff0000001e6ull)) {
			ret = 0;
			goto out;
		}
	}

	for (i = 0; i < 4; ++i)
		vcpu->pdptrs[i] = pdpt[offset + i];

out:
	kunmap_atomic(pdpt, KM_USER0);
	spin_unlock(&vcpu->kvm->lock);

	return i != 4;
	return ret;
}

void set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
@@ -368,8 +378,7 @@ void set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
			}
		} else
#endif
		if (is_pae(vcpu) &&
			    pdptrs_have_reserved_bits_set(vcpu, vcpu->cr3)) {
		if (is_pae(vcpu) && !load_pdptrs(vcpu, vcpu->cr3)) {
			printk(KERN_DEBUG "set_cr0: #GP, pdptrs "
			       "reserved bits\n");
			inject_gp(vcpu);
@@ -411,7 +420,7 @@ void set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
			return;
		}
	} else if (is_paging(vcpu) && !is_pae(vcpu) && (cr4 & CR4_PAE_MASK)
		   && pdptrs_have_reserved_bits_set(vcpu, vcpu->cr3)) {
		   && !load_pdptrs(vcpu, vcpu->cr3)) {
		printk(KERN_DEBUG "set_cr4: #GP, pdptrs reserved bits\n");
		inject_gp(vcpu);
	}
@@ -443,7 +452,7 @@ void set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3)
			return;
		}
		if (is_paging(vcpu) && is_pae(vcpu) &&
		    pdptrs_have_reserved_bits_set(vcpu, cr3)) {
		    !load_pdptrs(vcpu, cr3)) {
			printk(KERN_DEBUG "set_cr3: #GP, pdptrs "
			       "reserved bits\n");
			inject_gp(vcpu);