Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit e6ac9561 authored by Josh Poimboeuf's avatar Josh Poimboeuf Committed by Greg Kroah-Hartman
Browse files

KVM: VMX: Prevent guest RSB poisoning attacks with eIBRS



commit fc02735b14fff8c6678b521d324ade27b1a3d4cf upstream.

On eIBRS systems, the returns in the vmexit return path from
__vmx_vcpu_run() to vmx_vcpu_run() are exposed to RSB poisoning attacks.

Fix that by moving the post-vmexit spec_ctrl handling to immediately
after the vmexit.

Signed-off-by: default avatarJosh Poimboeuf <jpoimboe@kernel.org>
Signed-off-by: default avatarPeter Zijlstra (Intel) <peterz@infradead.org>
Signed-off-by: default avatarBorislav Petkov <bp@suse.de>
Signed-off-by: default avatarThadeu Lima de Souza Cascardo <cascardo@canonical.com>
[ bp: Adjust for the fact that vmexit is in inline assembly ]
Signed-off-by: default avatarSuraj Jitindar Singh <surajjs@amazon.com>
Signed-off-by: default avatarSuleiman Souhlal <suleiman@google.com>
Signed-off-by: default avatarGreg Kroah-Hartman <gregkh@linuxfoundation.org>
parent 24344e2b
Loading
Loading
Loading
Loading
+2 −1
Original line number Diff line number Diff line
@@ -271,7 +271,7 @@ extern char __indirect_thunk_end[];
 * retpoline and IBRS mitigations for Spectre v2 need this; only on future
 * CPUs with IBRS_ALL *might* it be avoided.
 */
static inline void vmexit_fill_RSB(void)
static __always_inline void vmexit_fill_RSB(void)
{
#ifdef CONFIG_RETPOLINE
	unsigned long loops;
@@ -306,6 +306,7 @@ static inline void indirect_branch_prediction_barrier(void)

/* The Intel SPEC CTRL MSR base value cache */
extern u64 x86_spec_ctrl_base;
extern u64 x86_spec_ctrl_current;
extern void write_spec_ctrl_current(u64 val, bool force);
extern u64 spec_ctrl_current(void);

+4 −0
Original line number Diff line number Diff line
@@ -185,6 +185,10 @@ void __init check_bugs(void)
#endif
}

/*
 * NOTE: For VMX, this function is not called in the vmexit path.
 * It uses vmx_spec_ctrl_restore_host() instead.
 */
void
x86_virt_spec_ctrl(u64 guest_spec_ctrl, u64 guest_virt_spec_ctrl, bool setguest)
{
+40 −6
Original line number Diff line number Diff line
@@ -10760,10 +10760,31 @@ static void vmx_update_hv_timer(struct kvm_vcpu *vcpu)
	vmx->loaded_vmcs->hv_timer_armed = false;
}

u64 __always_inline vmx_spec_ctrl_restore_host(struct vcpu_vmx *vmx)
{
	u64 guestval, hostval = this_cpu_read(x86_spec_ctrl_current);

	if (!cpu_feature_enabled(X86_FEATURE_MSR_SPEC_CTRL))
		return 0;

	guestval = __rdmsr(MSR_IA32_SPEC_CTRL);

	/*
	 * If the guest/host SPEC_CTRL values differ, restore the host value.
	 */
	if (guestval != hostval)
		native_wrmsrl(MSR_IA32_SPEC_CTRL, hostval);

	barrier_nospec();

	return guestval;
}

static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
{
	struct vcpu_vmx *vmx = to_vmx(vcpu);
	unsigned long cr3, cr4, evmcs_rsp;
	u64 spec_ctrl;

	/* Record the guest's net vcpu time for enforced NMI injections. */
	if (unlikely(!enable_vnmi &&
@@ -10989,6 +11010,24 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
#endif
	      );

	/*
	 * IMPORTANT: RSB filling and SPEC_CTRL handling must be done before
	 * the first unbalanced RET after vmexit!
	 *
	 * For retpoline, RSB filling is needed to prevent poisoned RSB entries
	 * and (in some cases) RSB underflow.
	 *
	 * eIBRS has its own protection against poisoned RSB, so it doesn't
	 * need the RSB filling sequence.  But it does need to be enabled
	 * before the first unbalanced RET.
	 *
	 * So no RETs before vmx_spec_ctrl_restore_host() below.
	 */
	vmexit_fill_RSB();

	/* Save this for below */
	spec_ctrl = vmx_spec_ctrl_restore_host(vmx);

	vmx_enable_fb_clear(vmx);

	/*
@@ -11007,12 +11046,7 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
	 * save it.
	 */
	if (unlikely(!msr_write_intercepted(vcpu, MSR_IA32_SPEC_CTRL)))
		vmx->spec_ctrl = native_read_msr(MSR_IA32_SPEC_CTRL);

	x86_spec_ctrl_restore_host(vmx->spec_ctrl, 0);

	/* Eliminate branch target predictions from guest mode */
	vmexit_fill_RSB();
		vmx->spec_ctrl = spec_ctrl;

	/* All fields are clean at this point */
	if (static_branch_unlikely(&enable_evmcs))