Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit df5d4ea2 authored by Sean Christopherson's avatar Sean Christopherson Committed by Greg Kroah-Hartman
Browse files

KVM: x86: Always use 32-bit SMRAM save state for 32-bit kernels



[ Upstream commit b68f3cc7d978943fcf85148165b00594c38db776 ]

Invoking the 64-bit variation on a 32-bit kenrel will crash the guest,
trigger a WARN, and/or lead to a buffer overrun in the host, e.g.
rsm_load_state_64() writes r8-r15 unconditionally, but enum kvm_reg and
thus x86_emulate_ctxt._regs only define r8-r15 for CONFIG_X86_64.

KVM allows userspace to report long mode support via CPUID, even though
the guest is all but guaranteed to crash if it actually tries to enable
long mode.  But, a pure 32-bit guest that is ignorant of long mode will
happily plod along.

SMM complicates things as 64-bit CPUs use a different SMRAM save state
area.  KVM handles this correctly for 64-bit kernels, e.g. uses the
legacy save state map if userspace has hid long mode from the guest,
but doesn't fare well when userspace reports long mode support on a
32-bit host kernel (32-bit KVM doesn't support 64-bit guests).

Since the alternative is to crash the guest, e.g. by not loading state
or explicitly requesting shutdown, unconditionally use the legacy SMRAM
save state map for 32-bit KVM.  If a guest has managed to get far enough
to handle SMIs when running under a weird/buggy userspace hypervisor,
then don't deliberately crash the guest since there are no downsides
(from KVM's perspective) to allow it to continue running.

Fixes: 660a5d51 ("KVM: x86: save/load state on SMM switch")
Cc: stable@vger.kernel.org
Signed-off-by: default avatarSean Christopherson <sean.j.christopherson@intel.com>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
Signed-off-by: default avatarSasha Levin <sashal@kernel.org>
parent 7a74d806
Loading
Loading
Loading
Loading
+10 −0
Original line number Diff line number Diff line
@@ -2331,12 +2331,16 @@ static int em_lseg(struct x86_emulate_ctxt *ctxt)

static int emulator_has_longmode(struct x86_emulate_ctxt *ctxt)
{
#ifdef CONFIG_X86_64
	u32 eax, ebx, ecx, edx;

	eax = 0x80000001;
	ecx = 0;
	ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx, false);
	return edx & bit(X86_FEATURE_LM);
#else
	return false;
#endif
}

#define GET_SMSTATE(type, smbase, offset)				  \
@@ -2381,6 +2385,7 @@ static int rsm_load_seg_32(struct x86_emulate_ctxt *ctxt, u64 smbase, int n)
	return X86EMUL_CONTINUE;
}

#ifdef CONFIG_X86_64
static int rsm_load_seg_64(struct x86_emulate_ctxt *ctxt, u64 smbase, int n)
{
	struct desc_struct desc;
@@ -2399,6 +2404,7 @@ static int rsm_load_seg_64(struct x86_emulate_ctxt *ctxt, u64 smbase, int n)
	ctxt->ops->set_segment(ctxt, selector, &desc, base3, n);
	return X86EMUL_CONTINUE;
}
#endif

static int rsm_enter_protected_mode(struct x86_emulate_ctxt *ctxt,
				    u64 cr0, u64 cr3, u64 cr4)
@@ -2499,6 +2505,7 @@ static int rsm_load_state_32(struct x86_emulate_ctxt *ctxt, u64 smbase)
	return rsm_enter_protected_mode(ctxt, cr0, cr3, cr4);
}

#ifdef CONFIG_X86_64
static int rsm_load_state_64(struct x86_emulate_ctxt *ctxt, u64 smbase)
{
	struct desc_struct desc;
@@ -2560,6 +2567,7 @@ static int rsm_load_state_64(struct x86_emulate_ctxt *ctxt, u64 smbase)

	return X86EMUL_CONTINUE;
}
#endif

static int em_rsm(struct x86_emulate_ctxt *ctxt)
{
@@ -2616,9 +2624,11 @@ static int em_rsm(struct x86_emulate_ctxt *ctxt)
	if (ctxt->ops->pre_leave_smm(ctxt, smbase))
		return X86EMUL_UNHANDLEABLE;

#ifdef CONFIG_X86_64
	if (emulator_has_longmode(ctxt))
		ret = rsm_load_state_64(ctxt, smbase + 0x8000);
	else
#endif
		ret = rsm_load_state_32(ctxt, smbase + 0x8000);

	if (ret != X86EMUL_CONTINUE) {
+6 −4
Original line number Diff line number Diff line
@@ -7227,9 +7227,9 @@ static void enter_smm_save_state_32(struct kvm_vcpu *vcpu, char *buf)
	put_smstate(u32, buf, 0x7ef8, vcpu->arch.smbase);
}

#ifdef CONFIG_X86_64
static void enter_smm_save_state_64(struct kvm_vcpu *vcpu, char *buf)
{
#ifdef CONFIG_X86_64
	struct desc_ptr dt;
	struct kvm_segment seg;
	unsigned long val;
@@ -7279,10 +7279,8 @@ static void enter_smm_save_state_64(struct kvm_vcpu *vcpu, char *buf)

	for (i = 0; i < 6; i++)
		enter_smm_save_seg_64(vcpu, buf, i);
#else
	WARN_ON_ONCE(1);
#endif
}
#endif

static void enter_smm(struct kvm_vcpu *vcpu)
{
@@ -7293,9 +7291,11 @@ static void enter_smm(struct kvm_vcpu *vcpu)

	trace_kvm_enter_smm(vcpu->vcpu_id, vcpu->arch.smbase, true);
	memset(buf, 0, 512);
#ifdef CONFIG_X86_64
	if (guest_cpuid_has(vcpu, X86_FEATURE_LM))
		enter_smm_save_state_64(vcpu, buf);
	else
#endif
		enter_smm_save_state_32(vcpu, buf);

	/*
@@ -7353,8 +7353,10 @@ static void enter_smm(struct kvm_vcpu *vcpu)
	kvm_set_segment(vcpu, &ds, VCPU_SREG_GS);
	kvm_set_segment(vcpu, &ds, VCPU_SREG_SS);

#ifdef CONFIG_X86_64
	if (guest_cpuid_has(vcpu, X86_FEATURE_LM))
		kvm_x86_ops->set_efer(vcpu, 0);
#endif

	kvm_update_cpuid(vcpu);
	kvm_mmu_reset_context(vcpu);