Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit fd8cb433 authored by Yu Zhang's avatar Yu Zhang Committed by Paolo Bonzini
Browse files

KVM: MMU: Expose the LA57 feature to VM.



This patch exposes 5 level page table feature to the VM.
At the same time, the canonical virtual address checking is
extended to support both 48-bits and 57-bits address width.

Signed-off-by: default avatarYu Zhang <yu.c.zhang@linux.intel.com>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent 855feb67
Loading
Loading
Loading
Loading
+2 −16
Original line number Diff line number Diff line
@@ -85,8 +85,8 @@
			  | X86_CR4_PSE | X86_CR4_PAE | X86_CR4_MCE     \
			  | X86_CR4_PGE | X86_CR4_PCE | X86_CR4_OSFXSR | X86_CR4_PCIDE \
			  | X86_CR4_OSXSAVE | X86_CR4_SMEP | X86_CR4_FSGSBASE \
			  | X86_CR4_OSXMMEXCPT | X86_CR4_VMXE | X86_CR4_SMAP \
			  | X86_CR4_PKE))
			  | X86_CR4_OSXMMEXCPT | X86_CR4_LA57 | X86_CR4_VMXE \
			  | X86_CR4_SMAP | X86_CR4_PKE))

#define CR8_RESERVED_BITS (~(unsigned long)X86_CR8_TPR)

@@ -1300,20 +1300,6 @@ static inline void kvm_inject_gp(struct kvm_vcpu *vcpu, u32 error_code)
	kvm_queue_exception_e(vcpu, GP_VECTOR, error_code);
}

static inline u64 get_canonical(u64 la)
{
	return ((int64_t)la << 16) >> 16;
}

static inline bool is_noncanonical_address(u64 la)
{
#ifdef CONFIG_X86_64
	return get_canonical(la) != la;
#else
	return false;
#endif
}

#define TSS_IOPB_BASE_OFFSET 0x66
#define TSS_BASE_SIZE 0x68
#define TSS_IOPB_SIZE (65536 / 8)
+10 −6
Original line number Diff line number Diff line
@@ -126,13 +126,16 @@ int kvm_update_cpuid(struct kvm_vcpu *vcpu)
		best->ebx = xstate_required_size(vcpu->arch.xcr0, true);

	/*
	 * The existing code assumes virtual address is 48-bit in the canonical
	 * address checks; exit if it is ever changed.
	 * The existing code assumes virtual address is 48-bit or 57-bit in the
	 * canonical address checks; exit if it is ever changed.
	 */
	best = kvm_find_cpuid_entry(vcpu, 0x80000008, 0);
	if (best && ((best->eax & 0xff00) >> 8) != 48 &&
		((best->eax & 0xff00) >> 8) != 0)
	if (best) {
		int vaddr_bits = (best->eax & 0xff00) >> 8;

		if (vaddr_bits != 48 && vaddr_bits != 57 && vaddr_bits != 0)
			return -EINVAL;
	}

	/* Update physical-address width */
	vcpu->arch.maxphyaddr = cpuid_query_maxphyaddr(vcpu);
@@ -384,7 +387,8 @@ static inline int __do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function,

	/* cpuid 7.0.ecx*/
	const u32 kvm_cpuid_7_0_ecx_x86_features =
		F(AVX512VBMI) | F(PKU) | 0 /*OSPKE*/ | F(AVX512_VPOPCNTDQ);
		F(AVX512VBMI) | F(LA57) | F(PKU) |
		0 /*OSPKE*/ | F(AVX512_VPOPCNTDQ);

	/* cpuid 7.0.edx*/
	const u32 kvm_cpuid_7_0_edx_x86_features =
+9 −7
Original line number Diff line number Diff line
@@ -689,16 +689,18 @@ static __always_inline int __linearize(struct x86_emulate_ctxt *ctxt,
	ulong la;
	u32 lim;
	u16 sel;
	u8  va_bits;

	la = seg_base(ctxt, addr.seg) + addr.ea;
	*max_size = 0;
	switch (mode) {
	case X86EMUL_MODE_PROT64:
		*linear = la;
		if (is_noncanonical_address(la))
		va_bits = ctxt_virt_addr_bits(ctxt);
		if (get_canonical(la, va_bits) != la)
			goto bad;

		*max_size = min_t(u64, ~0u, (1ull << 48) - la);
		*max_size = min_t(u64, ~0u, (1ull << va_bits) - la);
		if (size > *max_size)
			goto bad;
		break;
@@ -1749,8 +1751,8 @@ static int __load_segment_descriptor(struct x86_emulate_ctxt *ctxt,
				sizeof(base3), &ctxt->exception);
		if (ret != X86EMUL_CONTINUE)
			return ret;
		if (is_noncanonical_address(get_desc_base(&seg_desc) |
					     ((u64)base3 << 32)))
		if (emul_is_noncanonical_address(get_desc_base(&seg_desc) |
				((u64)base3 << 32), ctxt))
			return emulate_gp(ctxt, 0);
	}
load:
@@ -2841,8 +2843,8 @@ static int em_sysexit(struct x86_emulate_ctxt *ctxt)
		ss_sel = cs_sel + 8;
		cs.d = 0;
		cs.l = 1;
		if (is_noncanonical_address(rcx) ||
		    is_noncanonical_address(rdx))
		if (emul_is_noncanonical_address(rcx, ctxt) ||
		    emul_is_noncanonical_address(rdx, ctxt))
			return emulate_gp(ctxt, 0);
		break;
	}
@@ -3757,7 +3759,7 @@ static int em_lgdt_lidt(struct x86_emulate_ctxt *ctxt, bool lgdt)
	if (rc != X86EMUL_CONTINUE)
		return rc;
	if (ctxt->mode == X86EMUL_MODE_PROT64 &&
	    is_noncanonical_address(desc_ptr.address))
	    emul_is_noncanonical_address(desc_ptr.address, ctxt))
		return emulate_gp(ctxt, 0);
	if (lgdt)
		ctxt->ops->set_gdt(ctxt, &desc_ptr);
+1 −1
Original line number Diff line number Diff line
@@ -4,7 +4,7 @@
#define KVM_POSSIBLE_CR0_GUEST_BITS X86_CR0_TS
#define KVM_POSSIBLE_CR4_GUEST_BITS				  \
	(X86_CR4_PVI | X86_CR4_DE | X86_CR4_PCE | X86_CR4_OSFXSR  \
	 | X86_CR4_OSXMMEXCPT | X86_CR4_PGE)
	 | X86_CR4_OSXMMEXCPT | X86_CR4_LA57 | X86_CR4_PGE)

static inline unsigned long kvm_register_read(struct kvm_vcpu *vcpu,
					      enum kvm_reg reg)
+4 −4
Original line number Diff line number Diff line
@@ -122,7 +122,7 @@ module_param_named(preemption_timer, enable_preemption_timer, bool, S_IRUGO);
	(KVM_VM_CR0_ALWAYS_ON_UNRESTRICTED_GUEST | X86_CR0_PG | X86_CR0_PE)
#define KVM_CR4_GUEST_OWNED_BITS				      \
	(X86_CR4_PVI | X86_CR4_DE | X86_CR4_PCE | X86_CR4_OSFXSR      \
	 | X86_CR4_OSXMMEXCPT | X86_CR4_TSD)
	 | X86_CR4_OSXMMEXCPT | X86_CR4_LA57 | X86_CR4_TSD)

#define KVM_PMODE_VM_CR4_ALWAYS_ON (X86_CR4_PAE | X86_CR4_VMXE)
#define KVM_RMODE_VM_CR4_ALWAYS_ON (X86_CR4_VME | X86_CR4_PAE | X86_CR4_VMXE)
@@ -3374,7 +3374,7 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
		    (!msr_info->host_initiated &&
		     !guest_cpuid_has(vcpu, X86_FEATURE_MPX)))
			return 1;
		if (is_noncanonical_address(data & PAGE_MASK) ||
		if (is_noncanonical_address(data & PAGE_MASK, vcpu) ||
		    (data & MSR_IA32_BNDCFGS_RSVD))
			return 1;
		vmcs_write64(GUEST_BNDCFGS, data);
@@ -7143,7 +7143,7 @@ static int get_vmx_mem_address(struct kvm_vcpu *vcpu,
		 * non-canonical form. This is the only check on the memory
		 * destination for long mode!
		 */
		exn = is_noncanonical_address(*ret);
		exn = is_noncanonical_address(*ret, vcpu);
	} else if (is_protmode(vcpu)) {
		/* Protected mode: apply checks for segment validity in the
		 * following order:
@@ -7948,7 +7948,7 @@ static int handle_invvpid(struct kvm_vcpu *vcpu)

	switch (type) {
	case VMX_VPID_EXTENT_INDIVIDUAL_ADDR:
		if (is_noncanonical_address(operand.gla)) {
		if (is_noncanonical_address(operand.gla, vcpu)) {
			nested_vmx_failValid(vcpu,
				VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID);
			return kvm_skip_emulated_instruction(vcpu);
Loading