Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 0d62a56d authored by Paolo Bonzini's avatar Paolo Bonzini Committed by Greg Kroah-Hartman
Browse files

KVM/x86: Remove indirect MSR op calls from SPEC_CTRL



commit ecb586bd29c99fb4de599dec388658e74388daad upstream.

Having a paravirt indirect call in the IBRS restore path is not a
good idea, since we are trying to protect from speculative execution
of bogus indirect branch targets.  It is also slower, so use
native_wrmsrl() on the vmentry path too.

Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
Reviewed-by: default avatarJim Mattson <jmattson@google.com>
Cc: David Woodhouse <dwmw@amazon.co.uk>
Cc: KarimAllah Ahmed <karahmed@amazon.de>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Radim Krčmář <rkrcmar@redhat.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: kvm@vger.kernel.org
Cc: stable@vger.kernel.org
Fixes: d28b387fb74da95d69d2615732f50cceb38e9a4d
Link: http://lkml.kernel.org/r/20180222154318.20361-2-pbonzini@redhat.com


Signed-off-by: default avatarIngo Molnar <mingo@kernel.org>
Signed-off-by: default avatarGreg Kroah-Hartman <gregkh@linuxfoundation.org>
parent 7135aaf3
Loading
Loading
Loading
Loading
+4 −3
Original line number Diff line number Diff line
@@ -45,6 +45,7 @@
#include <asm/debugreg.h>
#include <asm/kvm_para.h>
#include <asm/irq_remapping.h>
#include <asm/microcode.h>
#include <asm/nospec-branch.h>

#include <asm/virtext.h>
@@ -5015,7 +5016,7 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu)
	 * being speculatively taken.
	 */
	if (svm->spec_ctrl)
		wrmsrl(MSR_IA32_SPEC_CTRL, svm->spec_ctrl);
		native_wrmsrl(MSR_IA32_SPEC_CTRL, svm->spec_ctrl);

	asm volatile (
		"push %%" _ASM_BP "; \n\t"
@@ -5125,10 +5126,10 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu)
	 * save it.
	 */
	if (!msr_write_intercepted(vcpu, MSR_IA32_SPEC_CTRL))
		rdmsrl(MSR_IA32_SPEC_CTRL, svm->spec_ctrl);
		svm->spec_ctrl = native_read_msr(MSR_IA32_SPEC_CTRL);

	if (svm->spec_ctrl)
		wrmsrl(MSR_IA32_SPEC_CTRL, 0);
		native_wrmsrl(MSR_IA32_SPEC_CTRL, 0);

	/* Eliminate branch target predictions from guest mode */
	vmexit_fill_RSB();
+4 −3
Original line number Diff line number Diff line
@@ -51,6 +51,7 @@
#include <asm/apic.h>
#include <asm/irq_remapping.h>
#include <asm/mmu_context.h>
#include <asm/microcode.h>
#include <asm/nospec-branch.h>

#include "trace.h"
@@ -9431,7 +9432,7 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
	 * being speculatively taken.
	 */
	if (vmx->spec_ctrl)
		wrmsrl(MSR_IA32_SPEC_CTRL, vmx->spec_ctrl);
		native_wrmsrl(MSR_IA32_SPEC_CTRL, vmx->spec_ctrl);

	vmx->__launched = vmx->loaded_vmcs->launched;
	asm(
@@ -9567,10 +9568,10 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
	 * save it.
	 */
	if (!msr_write_intercepted(vcpu, MSR_IA32_SPEC_CTRL))
		rdmsrl(MSR_IA32_SPEC_CTRL, vmx->spec_ctrl);
		vmx->spec_ctrl = native_read_msr(MSR_IA32_SPEC_CTRL);

	if (vmx->spec_ctrl)
		wrmsrl(MSR_IA32_SPEC_CTRL, 0);
		native_wrmsrl(MSR_IA32_SPEC_CTRL, 0);

	/* Eliminate branch target predictions from guest mode */
	vmexit_fill_RSB();