Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit d55e2cb2 authored by Avi Kivity's avatar Avi Kivity
Browse files

KVM: MMU: Store nx bit for large page shadows



We need to distinguish between large page shadows which have the nx bit set
and those which don't.  The problem shows up when booting a newer smp Linux
kernel, where the trampoline page (which is in real mode, which uses the
same shadow pages as large pages) is using the same mapping as a kernel data
page, which is mapped using nx, causing kvm to spin on that page.

Signed-off-by: default avatarAvi Kivity <avi@qumranet.com>
parent 2cb7e714
Loading
Loading
Loading
Loading
+2 −2
Original line number Original line Diff line number Diff line
@@ -121,7 +121,7 @@ struct kvm_pte_chain {
 *   bits 4:7 - page table level for this shadow (1-4)
 *   bits 4:7 - page table level for this shadow (1-4)
 *   bits 8:9 - page table quadrant for 2-level guests
 *   bits 8:9 - page table quadrant for 2-level guests
 *   bit   16 - "metaphysical" - gfn is not a real page (huge page/real mode)
 *   bit   16 - "metaphysical" - gfn is not a real page (huge page/real mode)
 *   bits 17:18 - "access" - the user and writable bits of a huge page pde
 *   bits 17:19 - "access" - the user, writable, and nx bits of a huge page pde
 */
 */
union kvm_mmu_page_role {
union kvm_mmu_page_role {
	unsigned word;
	unsigned word;
@@ -131,7 +131,7 @@ union kvm_mmu_page_role {
		unsigned quadrant : 2;
		unsigned quadrant : 2;
		unsigned pad_for_nice_hex_output : 6;
		unsigned pad_for_nice_hex_output : 6;
		unsigned metaphysical : 1;
		unsigned metaphysical : 1;
		unsigned hugepage_access : 2;
		unsigned hugepage_access : 3;
	};
	};
};
};


+2 −0
Original line number Original line Diff line number Diff line
@@ -366,6 +366,8 @@ static u64 *FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr,
			metaphysical = 1;
			metaphysical = 1;
			hugepage_access = *guest_ent;
			hugepage_access = *guest_ent;
			hugepage_access &= PT_USER_MASK | PT_WRITABLE_MASK;
			hugepage_access &= PT_USER_MASK | PT_WRITABLE_MASK;
			if (*guest_ent & PT64_NX_MASK)
				hugepage_access |= (1 << 2);
			hugepage_access >>= PT_WRITABLE_SHIFT;
			hugepage_access >>= PT_WRITABLE_SHIFT;
			table_gfn = (*guest_ent & PT_BASE_ADDR_MASK)
			table_gfn = (*guest_ent & PT_BASE_ADDR_MASK)
				>> PAGE_SHIFT;
				>> PAGE_SHIFT;