Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit cbf71279 authored by Radim Krčmář's avatar Radim Krčmář Committed by Paolo Bonzini
Browse files

KVM: nVMX: fix nested_vmx_check_vmptr failure paths under debugging



kvm_skip_emulated_instruction() will return 0 if userspace is
single-stepping the guest.

kvm_skip_emulated_instruction() uses return status convention of exit
handler: 0 means "exit to userspace" and 1 means "continue vm entries".
The problem is that nested_vmx_check_vmptr() return status means
something else: 0 is ok, 1 is error.

This means we would continue executing after a failure.  Static checker
noticed it because vmptr was not initialized.

Reported-by: default avatarDan Carpenter <dan.carpenter@oracle.com>
Fixes: 6affcbed ("KVM: x86: Add kvm_skip_emulated_instruction and use it.")
Signed-off-by: default avatarRadim Krčmář <rkrcmar@redhat.com>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent 52b54190
Loading
Loading
Loading
Loading
+57 −83
Original line number Diff line number Diff line
@@ -6914,97 +6914,21 @@ static int get_vmx_mem_address(struct kvm_vcpu *vcpu,
	return 0;
}

/*
 * This function performs the various checks including
 * - if it's 4KB aligned
 * - No bits beyond the physical address width are set
 * - Returns 0 on success or else 1
 * (Intel SDM Section 30.3)
 */
static int nested_vmx_check_vmptr(struct kvm_vcpu *vcpu, int exit_reason,
				  gpa_t *vmpointer)
static int nested_vmx_get_vmptr(struct kvm_vcpu *vcpu, gpa_t *vmpointer)
{
	gva_t gva;
	gpa_t vmptr;
	struct x86_exception e;
	struct page *page;
	struct vcpu_vmx *vmx = to_vmx(vcpu);
	int maxphyaddr = cpuid_maxphyaddr(vcpu);

	if (get_vmx_mem_address(vcpu, vmcs_readl(EXIT_QUALIFICATION),
			vmcs_read32(VMX_INSTRUCTION_INFO), false, &gva))
		return 1;

	if (kvm_read_guest_virt(&vcpu->arch.emulate_ctxt, gva, &vmptr,
				sizeof(vmptr), &e)) {
	if (kvm_read_guest_virt(&vcpu->arch.emulate_ctxt, gva, vmpointer,
				sizeof(*vmpointer), &e)) {
		kvm_inject_page_fault(vcpu, &e);
		return 1;
	}

	switch (exit_reason) {
	case EXIT_REASON_VMON:
		/*
		 * SDM 3: 24.11.5
		 * The first 4 bytes of VMXON region contain the supported
		 * VMCS revision identifier
		 *
		 * Note - IA32_VMX_BASIC[48] will never be 1
		 * for the nested case;
		 * which replaces physical address width with 32
		 *
		 */
		if (!PAGE_ALIGNED(vmptr) || (vmptr >> maxphyaddr)) {
			nested_vmx_failInvalid(vcpu);
			return kvm_skip_emulated_instruction(vcpu);
		}

		page = nested_get_page(vcpu, vmptr);
		if (page == NULL) {
			nested_vmx_failInvalid(vcpu);
			return kvm_skip_emulated_instruction(vcpu);
		}
		if (*(u32 *)kmap(page) != VMCS12_REVISION) {
			kunmap(page);
			nested_release_page_clean(page);
			nested_vmx_failInvalid(vcpu);
			return kvm_skip_emulated_instruction(vcpu);
		}
		kunmap(page);
		nested_release_page_clean(page);
		vmx->nested.vmxon_ptr = vmptr;
		break;
	case EXIT_REASON_VMCLEAR:
		if (!PAGE_ALIGNED(vmptr) || (vmptr >> maxphyaddr)) {
			nested_vmx_failValid(vcpu,
					     VMXERR_VMCLEAR_INVALID_ADDRESS);
			return kvm_skip_emulated_instruction(vcpu);
		}

		if (vmptr == vmx->nested.vmxon_ptr) {
			nested_vmx_failValid(vcpu,
					     VMXERR_VMCLEAR_VMXON_POINTER);
			return kvm_skip_emulated_instruction(vcpu);
		}
		break;
	case EXIT_REASON_VMPTRLD:
		if (!PAGE_ALIGNED(vmptr) || (vmptr >> maxphyaddr)) {
			nested_vmx_failValid(vcpu,
					     VMXERR_VMPTRLD_INVALID_ADDRESS);
			return kvm_skip_emulated_instruction(vcpu);
		}

		if (vmptr == vmx->nested.vmxon_ptr) {
			nested_vmx_failValid(vcpu,
					     VMXERR_VMPTRLD_VMXON_POINTER);
			return kvm_skip_emulated_instruction(vcpu);
		}
		break;
	default:
		return 1; /* shouldn't happen */
	}

	if (vmpointer)
		*vmpointer = vmptr;
	return 0;
}

@@ -7066,6 +6990,8 @@ static int enter_vmx_operation(struct kvm_vcpu *vcpu)
static int handle_vmon(struct kvm_vcpu *vcpu)
{
	int ret;
	gpa_t vmptr;
	struct page *page;
	struct vcpu_vmx *vmx = to_vmx(vcpu);
	const u64 VMXON_NEEDED_FEATURES = FEATURE_CONTROL_LOCKED
		| FEATURE_CONTROL_VMXON_ENABLED_OUTSIDE_SMX;
@@ -7095,9 +7021,37 @@ static int handle_vmon(struct kvm_vcpu *vcpu)
		return 1;
	}

	if (nested_vmx_check_vmptr(vcpu, EXIT_REASON_VMON, NULL))
	if (nested_vmx_get_vmptr(vcpu, &vmptr))
		return 1;

	/*
	 * SDM 3: 24.11.5
	 * The first 4 bytes of VMXON region contain the supported
	 * VMCS revision identifier
	 *
	 * Note - IA32_VMX_BASIC[48] will never be 1 for the nested case;
	 * which replaces physical address width with 32
	 */
	if (!PAGE_ALIGNED(vmptr) || (vmptr >> cpuid_maxphyaddr(vcpu))) {
		nested_vmx_failInvalid(vcpu);
		return kvm_skip_emulated_instruction(vcpu);
	}

	page = nested_get_page(vcpu, vmptr);
	if (page == NULL) {
		nested_vmx_failInvalid(vcpu);
		return kvm_skip_emulated_instruction(vcpu);
	}
	if (*(u32 *)kmap(page) != VMCS12_REVISION) {
		kunmap(page);
		nested_release_page_clean(page);
		nested_vmx_failInvalid(vcpu);
		return kvm_skip_emulated_instruction(vcpu);
	}
	kunmap(page);
	nested_release_page_clean(page);

	vmx->nested.vmxon_ptr = vmptr;
	ret = enter_vmx_operation(vcpu);
	if (ret)
		return ret;
@@ -7213,9 +7167,19 @@ static int handle_vmclear(struct kvm_vcpu *vcpu)
	if (!nested_vmx_check_permission(vcpu))
		return 1;

	if (nested_vmx_check_vmptr(vcpu, EXIT_REASON_VMCLEAR, &vmptr))
	if (nested_vmx_get_vmptr(vcpu, &vmptr))
		return 1;

	if (!PAGE_ALIGNED(vmptr) || (vmptr >> cpuid_maxphyaddr(vcpu))) {
		nested_vmx_failValid(vcpu, VMXERR_VMCLEAR_INVALID_ADDRESS);
		return kvm_skip_emulated_instruction(vcpu);
	}

	if (vmptr == vmx->nested.vmxon_ptr) {
		nested_vmx_failValid(vcpu, VMXERR_VMCLEAR_VMXON_POINTER);
		return kvm_skip_emulated_instruction(vcpu);
	}

	if (vmptr == vmx->nested.current_vmptr)
		nested_release_vmcs12(vmx);

@@ -7545,9 +7509,19 @@ static int handle_vmptrld(struct kvm_vcpu *vcpu)
	if (!nested_vmx_check_permission(vcpu))
		return 1;

	if (nested_vmx_check_vmptr(vcpu, EXIT_REASON_VMPTRLD, &vmptr))
	if (nested_vmx_get_vmptr(vcpu, &vmptr))
		return 1;

	if (!PAGE_ALIGNED(vmptr) || (vmptr >> cpuid_maxphyaddr(vcpu))) {
		nested_vmx_failValid(vcpu, VMXERR_VMPTRLD_INVALID_ADDRESS);
		return kvm_skip_emulated_instruction(vcpu);
	}

	if (vmptr == vmx->nested.vmxon_ptr) {
		nested_vmx_failValid(vcpu, VMXERR_VMPTRLD_VMXON_POINTER);
		return kvm_skip_emulated_instruction(vcpu);
	}

	if (vmx->nested.current_vmptr != vmptr) {
		struct vmcs12 *new_vmcs12;
		struct page *page;