Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit cc3d967f authored by Ladi Prosek's avatar Ladi Prosek Committed by Radim Krčmář
Browse files

KVM: SVM: detect opening of SMI window using STGI intercept



Commit 05cade71 ("KVM: nSVM: fix SMI injection in guest mode") made
KVM mask SMI if GIF=0 but it didn't do anything to unmask it when GIF is
enabled.

The issue manifests for me as a significantly longer boot time of Windows
guests when running with SMM-enabled OVMF.

This commit fixes it by intercepting STGI instead of requesting immediate
exit if the reason why SMM was masked is GIF.

Fixes: 05cade71 ("KVM: nSVM: fix SMI injection in guest mode")
Signed-off-by: default avatarLadi Prosek <lprosek@redhat.com>
Signed-off-by: default avatarRadim Krčmář <rkrcmar@redhat.com>
parent 9b8ebbdb
Loading
Loading
Loading
Loading
+1 −0
Original line number Original line Diff line number Diff line
@@ -1065,6 +1065,7 @@ struct kvm_x86_ops {
	int (*smi_allowed)(struct kvm_vcpu *vcpu);
	int (*smi_allowed)(struct kvm_vcpu *vcpu);
	int (*pre_enter_smm)(struct kvm_vcpu *vcpu, char *smstate);
	int (*pre_enter_smm)(struct kvm_vcpu *vcpu, char *smstate);
	int (*pre_leave_smm)(struct kvm_vcpu *vcpu, u64 smbase);
	int (*pre_leave_smm)(struct kvm_vcpu *vcpu, u64 smbase);
	int (*enable_smi_window)(struct kvm_vcpu *vcpu);
};
};


struct kvm_arch_async_pf {
struct kvm_arch_async_pf {
+15 −1
Original line number Original line Diff line number Diff line
@@ -3187,7 +3187,7 @@ static int stgi_interception(struct vcpu_svm *svm)


	/*
	/*
	 * If VGIF is enabled, the STGI intercept is only added to
	 * If VGIF is enabled, the STGI intercept is only added to
	 * detect the opening of the NMI window; remove it now.
	 * detect the opening of the SMI/NMI window; remove it now.
	 */
	 */
	if (vgif_enabled(svm))
	if (vgif_enabled(svm))
		clr_intercept(svm, INTERCEPT_STGI);
		clr_intercept(svm, INTERCEPT_STGI);
@@ -5476,6 +5476,19 @@ static int svm_pre_leave_smm(struct kvm_vcpu *vcpu, u64 smbase)
	return ret;
	return ret;
}
}


static int enable_smi_window(struct kvm_vcpu *vcpu)
{
	struct vcpu_svm *svm = to_svm(vcpu);

	if (!gif_set(svm)) {
		if (vgif_enabled(svm))
			set_intercept(svm, INTERCEPT_STGI);
		/* STGI will cause a vm exit */
		return 1;
	}
	return 0;
}

static struct kvm_x86_ops svm_x86_ops __ro_after_init = {
static struct kvm_x86_ops svm_x86_ops __ro_after_init = {
	.cpu_has_kvm_support = has_svm,
	.cpu_has_kvm_support = has_svm,
	.disabled_by_bios = is_disabled,
	.disabled_by_bios = is_disabled,
@@ -5590,6 +5603,7 @@ static struct kvm_x86_ops svm_x86_ops __ro_after_init = {
	.smi_allowed = svm_smi_allowed,
	.smi_allowed = svm_smi_allowed,
	.pre_enter_smm = svm_pre_enter_smm,
	.pre_enter_smm = svm_pre_enter_smm,
	.pre_leave_smm = svm_pre_leave_smm,
	.pre_leave_smm = svm_pre_leave_smm,
	.enable_smi_window = enable_smi_window,
};
};


static int __init svm_init(void)
static int __init svm_init(void)
+6 −0
Original line number Original line Diff line number Diff line
@@ -11973,6 +11973,11 @@ static int vmx_pre_leave_smm(struct kvm_vcpu *vcpu, u64 smbase)
	return 0;
	return 0;
}
}


static int enable_smi_window(struct kvm_vcpu *vcpu)
{
	return 0;
}

static struct kvm_x86_ops vmx_x86_ops __ro_after_init = {
static struct kvm_x86_ops vmx_x86_ops __ro_after_init = {
	.cpu_has_kvm_support = cpu_has_kvm_support,
	.cpu_has_kvm_support = cpu_has_kvm_support,
	.disabled_by_bios = vmx_disabled_by_bios,
	.disabled_by_bios = vmx_disabled_by_bios,
@@ -12102,6 +12107,7 @@ static struct kvm_x86_ops vmx_x86_ops __ro_after_init = {
	.smi_allowed = vmx_smi_allowed,
	.smi_allowed = vmx_smi_allowed,
	.pre_enter_smm = vmx_pre_enter_smm,
	.pre_enter_smm = vmx_pre_enter_smm,
	.pre_leave_smm = vmx_pre_leave_smm,
	.pre_leave_smm = vmx_pre_leave_smm,
	.enable_smi_window = enable_smi_window,
};
};


static int __init vmx_init(void)
static int __init vmx_init(void)
+14 −8
Original line number Original line Diff line number Diff line
@@ -6892,16 +6892,22 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
		if (inject_pending_event(vcpu, req_int_win) != 0)
		if (inject_pending_event(vcpu, req_int_win) != 0)
			req_immediate_exit = true;
			req_immediate_exit = true;
		else {
		else {
			/* Enable NMI/IRQ window open exits if needed.
			/* Enable SMI/NMI/IRQ window open exits if needed.
			 *
			 *
			 * SMIs have two cases: 1) they can be nested, and
			 * SMIs have three cases:
			 * then there is nothing to do here because RSM will
			 * 1) They can be nested, and then there is nothing to
			 * cause a vmexit anyway; 2) or the SMI can be pending
			 *    do here because RSM will cause a vmexit anyway.
			 * because inject_pending_event has completed the
			 * 2) There is an ISA-specific reason why SMI cannot be
			 * injection of an IRQ or NMI from the previous vmexit,
			 *    injected, and the moment when this changes can be
			 * and then we request an immediate exit to inject the SMI.
			 *    intercepted.
			 * 3) Or the SMI can be pending because
			 *    inject_pending_event has completed the injection
			 *    of an IRQ or NMI from the previous vmexit, and
			 *    then we request an immediate exit to inject the
			 *    SMI.
			 */
			 */
			if (vcpu->arch.smi_pending && !is_smm(vcpu))
			if (vcpu->arch.smi_pending && !is_smm(vcpu))
				if (!kvm_x86_ops->enable_smi_window(vcpu))
					req_immediate_exit = true;
					req_immediate_exit = true;
			if (vcpu->arch.nmi_pending)
			if (vcpu->arch.nmi_pending)
				kvm_x86_ops->enable_nmi_window(vcpu);
				kvm_x86_ops->enable_nmi_window(vcpu);