Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 48005f64 authored by Jan Kiszka's avatar Jan Kiszka Committed by Avi Kivity
Browse files

KVM: x86: Save&restore interrupt shadow mask



The interrupt shadow created by STI or MOV-SS-like operations is part of
the VCPU state and must be preserved across migration. Transfer it in
the spare padding field of kvm_vcpu_events.interrupt.

As a side effect we now have to make vmx_set_interrupt_shadow robust
against both shadow types being set. Give MOV SS a higher priority and
skip STI in that case to avoid that VMX throws a fault on next entry.

Signed-off-by: default avatarJan Kiszka <jan.kiszka@siemens.com>
Signed-off-by: default avatarAvi Kivity <avi@redhat.com>
parent 03b82a30
Loading
Loading
Loading
Loading
+10 −1
Original line number Diff line number Diff line
@@ -656,6 +656,7 @@ struct kvm_clock_data {
4.29 KVM_GET_VCPU_EVENTS

Capability: KVM_CAP_VCPU_EVENTS
Extended by: KVM_CAP_INTR_SHADOW
Architectures: x86
Type: vm ioctl
Parameters: struct kvm_vcpu_event (out)
@@ -676,7 +677,7 @@ struct kvm_vcpu_events {
		__u8 injected;
		__u8 nr;
		__u8 soft;
		__u8 pad;
		__u8 shadow;
	} interrupt;
	struct {
		__u8 injected;
@@ -688,9 +689,13 @@ struct kvm_vcpu_events {
	__u32 flags;
};

KVM_VCPUEVENT_VALID_SHADOW may be set in the flags field to signal that
interrupt.shadow contains a valid state. Otherwise, this field is undefined.

4.30 KVM_SET_VCPU_EVENTS

Capability: KVM_CAP_VCPU_EVENTS
Extended by: KVM_CAP_INTR_SHADOW
Architectures: x86
Type: vm ioctl
Parameters: struct kvm_vcpu_event (in)
@@ -709,6 +714,10 @@ current in-kernel state. The bits are:
KVM_VCPUEVENT_VALID_NMI_PENDING - transfer nmi.pending to the kernel
KVM_VCPUEVENT_VALID_SIPI_VECTOR - transfer sipi_vector

If KVM_CAP_INTR_SHADOW is available, KVM_VCPUEVENT_VALID_SHADOW can be set in
the flags field to signal that interrupt.shadow contains a valid state and
shall be written into the VCPU.


5. The kvm_run structure

+6 −1
Original line number Diff line number Diff line
@@ -257,6 +257,11 @@ struct kvm_reinject_control {
/* When set in flags, include corresponding fields on KVM_SET_VCPU_EVENTS */
#define KVM_VCPUEVENT_VALID_NMI_PENDING	0x00000001
#define KVM_VCPUEVENT_VALID_SIPI_VECTOR	0x00000002
#define KVM_VCPUEVENT_VALID_SHADOW	0x00000004

/* Interrupt shadow states */
#define KVM_X86_SHADOW_INT_MOV_SS	0x01
#define KVM_X86_SHADOW_INT_STI		0x02

/* for KVM_GET/SET_VCPU_EVENTS */
struct kvm_vcpu_events {
@@ -271,7 +276,7 @@ struct kvm_vcpu_events {
		__u8 injected;
		__u8 nr;
		__u8 soft;
		__u8 pad;
		__u8 shadow;
	} interrupt;
	struct {
		__u8 injected;
+0 −3
Original line number Diff line number Diff line
@@ -153,9 +153,6 @@ struct decode_cache {
	struct fetch_cache fetch;
};

#define X86_SHADOW_INT_MOV_SS  1
#define X86_SHADOW_INT_STI     2

struct x86_emulate_ctxt {
	/* Register state before/after emulation. */
	struct kvm_vcpu *vcpu;
+2 −2
Original line number Diff line number Diff line
@@ -2128,7 +2128,7 @@ special_insn:
		}

		if (c->modrm_reg == VCPU_SREG_SS)
			toggle_interruptibility(ctxt, X86_SHADOW_INT_MOV_SS);
			toggle_interruptibility(ctxt, KVM_X86_SHADOW_INT_MOV_SS);

		rc = kvm_load_segment_descriptor(ctxt->vcpu, sel, c->modrm_reg);

@@ -2366,7 +2366,7 @@ special_insn:
		if (emulator_bad_iopl(ctxt))
			kvm_inject_gp(ctxt->vcpu, 0);
		else {
			toggle_interruptibility(ctxt, X86_SHADOW_INT_STI);
			toggle_interruptibility(ctxt, KVM_X86_SHADOW_INT_STI);
			ctxt->eflags |= X86_EFLAGS_IF;
			c->dst.type = OP_NONE;	/* Disable writeback. */
		}
+1 −1
Original line number Diff line number Diff line
@@ -265,7 +265,7 @@ static u32 svm_get_interrupt_shadow(struct kvm_vcpu *vcpu, int mask)
	u32 ret = 0;

	if (svm->vmcb->control.int_state & SVM_INTERRUPT_SHADOW_MASK)
		ret |= X86_SHADOW_INT_STI | X86_SHADOW_INT_MOV_SS;
		ret |= KVM_X86_SHADOW_INT_STI | KVM_X86_SHADOW_INT_MOV_SS;
	return ret & mask;
}

Loading