Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 64723cd3 authored by Uros Bizjak's avatar Uros Bizjak Committed by Greg Kroah-Hartman
Browse files

KVM/nVMX: Use __vmx_vcpu_run in nested_vmx_check_vmentry_hw



commit 150f17bfab37e981ba03b37440638138ff2aa9ec upstream.

Replace inline assembly in nested_vmx_check_vmentry_hw
with a call to __vmx_vcpu_run.  The function is not
performance critical, so (double) GPR save/restore
in __vmx_vcpu_run can be tolerated, as far as performance
effects are concerned.

Cc: Paolo Bonzini <pbonzini@redhat.com>
Cc: Sean Christopherson <seanjc@google.com>
Reviewed-and-tested-by: default avatarSean Christopherson <seanjc@google.com>
Signed-off-by: default avatarUros Bizjak <ubizjak@gmail.com>
[sean: dropped versioning info from changelog]
Signed-off-by: default avatarSean Christopherson <seanjc@google.com>
Message-Id: <20201231002702.2223707-5-seanjc@google.com>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
[cascardo: small fixups]
Signed-off-by: default avatarThadeu Lima de Souza Cascardo <cascardo@canonical.com>
Signed-off-by: default avatarGreg Kroah-Hartman <gregkh@linuxfoundation.org>
parent 57ba312f
Loading
Loading
Loading
Loading
+3 −29
Original line number Diff line number Diff line
@@ -11,6 +11,7 @@
#include "mmu.h"
#include "nested.h"
#include "trace.h"
#include "vmx.h"
#include "x86.h"

static bool __read_mostly enable_shadow_vmcs = 1;
@@ -2863,35 +2864,8 @@ static int nested_vmx_check_vmentry_hw(struct kvm_vcpu *vcpu)
		vmx->loaded_vmcs->host_state.cr4 = cr4;
	}

	asm(
		"sub $%c[wordsize], %%" _ASM_SP "\n\t" /* temporarily adjust RSP for CALL */
		"cmp %%" _ASM_SP ", %c[host_state_rsp](%[loaded_vmcs]) \n\t"
		"je 1f \n\t"
		__ex("vmwrite %%" _ASM_SP ", %[HOST_RSP]") "\n\t"
		"mov %%" _ASM_SP ", %c[host_state_rsp](%[loaded_vmcs]) \n\t"
		"1: \n\t"
		"add $%c[wordsize], %%" _ASM_SP "\n\t" /* un-adjust RSP */

		/* Check if vmlaunch or vmresume is needed */
		"cmpb $0, %c[launched](%[loaded_vmcs])\n\t"

		/*
		 * VMLAUNCH and VMRESUME clear RFLAGS.{CF,ZF} on VM-Exit, set
		 * RFLAGS.CF on VM-Fail Invalid and set RFLAGS.ZF on VM-Fail
		 * Valid.  vmx_vmenter() directly "returns" RFLAGS, and so the
		 * results of VM-Enter is captured via CC_{SET,OUT} to vm_fail.
		 */
		"call vmx_vmenter\n\t"

		CC_SET(be)
	      : ASM_CALL_CONSTRAINT, CC_OUT(be) (vm_fail)
	      :	[HOST_RSP]"r"((unsigned long)HOST_RSP),
		[loaded_vmcs]"r"(vmx->loaded_vmcs),
		[launched]"i"(offsetof(struct loaded_vmcs, launched)),
		[host_state_rsp]"i"(offsetof(struct loaded_vmcs, host_state.rsp)),
		[wordsize]"i"(sizeof(ulong))
	      : "memory"
	);
	vm_fail = __vmx_vcpu_run(vmx, (unsigned long *)&vcpu->arch.regs,
				 vmx->loaded_vmcs->launched);

	if (vmx->msr_autoload.host.nr)
		vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, vmx->msr_autoload.host.nr);
+0 −2
Original line number Diff line number Diff line
@@ -6540,8 +6540,6 @@ void vmx_update_host_rsp(struct vcpu_vmx *vmx, unsigned long host_rsp)
	}
}

bool __vmx_vcpu_run(struct vcpu_vmx *vmx, unsigned long *regs, bool launched);

static void vmx_vcpu_run(struct kvm_vcpu *vcpu)
{
	struct vcpu_vmx *vmx = to_vmx(vcpu);
+1 −0
Original line number Diff line number Diff line
@@ -336,6 +336,7 @@ void vmx_set_virtual_apic_mode(struct kvm_vcpu *vcpu);
struct shared_msr_entry *find_msr_entry(struct vcpu_vmx *vmx, u32 msr);
void pt_update_intercept_for_msr(struct vcpu_vmx *vmx);
void vmx_update_host_rsp(struct vcpu_vmx *vmx, unsigned long host_rsp);
bool __vmx_vcpu_run(struct vcpu_vmx *vmx, unsigned long *regs, bool launched);

#define POSTED_INTR_ON  0
#define POSTED_INTR_SN  1