Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit dab4b911 authored by Jan Kiszka's avatar Jan Kiszka Committed by Marcelo Tosatti
Browse files

KVM: x86: Extend KVM_SET_VCPU_EVENTS with selective updates



User space may not want to overwrite asynchronously changing VCPU event
states on write-back. So allow to skip nmi.pending and sipi_vector by
setting corresponding bits in the flags field of kvm_vcpu_events.

[avi: advertise the bits in KVM_GET_VCPU_EVENTS]

Signed-off-by: default avatarJan Kiszka <jan.kiszka@siemens.com>
Signed-off-by: default avatarAvi Kivity <avi@redhat.com>
parent 6e24a6ef
Loading
Loading
Loading
Loading
+9 −1
Original line number Original line Diff line number Diff line
@@ -685,7 +685,7 @@ struct kvm_vcpu_events {
		__u8 pad;
		__u8 pad;
	} nmi;
	} nmi;
	__u32 sipi_vector;
	__u32 sipi_vector;
	__u32 flags;   /* must be zero */
	__u32 flags;
};
};


4.30 KVM_SET_VCPU_EVENTS
4.30 KVM_SET_VCPU_EVENTS
@@ -701,6 +701,14 @@ vcpu.


See KVM_GET_VCPU_EVENTS for the data structure.
See KVM_GET_VCPU_EVENTS for the data structure.


Fields that may be modified asynchronously by running VCPUs can be excluded
from the update. These fields are nmi.pending and sipi_vector. Keep the
corresponding bits in the flags field cleared to suppress overwriting the
current in-kernel state. The bits are:

KVM_VCPUEVENT_VALID_NMI_PENDING - transfer nmi.pending to the kernel
KVM_VCPUEVENT_VALID_SIPI_VECTOR - transfer sipi_vector



5. The kvm_run structure
5. The kvm_run structure


+4 −0
Original line number Original line Diff line number Diff line
@@ -254,6 +254,10 @@ struct kvm_reinject_control {
	__u8 reserved[31];
	__u8 reserved[31];
};
};


/* When set in flags, include corresponding fields on KVM_SET_VCPU_EVENTS */
#define KVM_VCPUEVENT_VALID_NMI_PENDING	0x00000001
#define KVM_VCPUEVENT_VALID_SIPI_VECTOR	0x00000002

/* for KVM_GET/SET_VCPU_EVENTS */
/* for KVM_GET/SET_VCPU_EVENTS */
struct kvm_vcpu_events {
struct kvm_vcpu_events {
	struct {
	struct {
+8 −4
Original line number Original line Diff line number Diff line
@@ -1913,7 +1913,8 @@ static void kvm_vcpu_ioctl_x86_get_vcpu_events(struct kvm_vcpu *vcpu,


	events->sipi_vector = vcpu->arch.sipi_vector;
	events->sipi_vector = vcpu->arch.sipi_vector;


	events->flags = 0;
	events->flags = (KVM_VCPUEVENT_VALID_NMI_PENDING
			 | KVM_VCPUEVENT_VALID_SIPI_VECTOR);


	vcpu_put(vcpu);
	vcpu_put(vcpu);
}
}
@@ -1921,7 +1922,8 @@ static void kvm_vcpu_ioctl_x86_get_vcpu_events(struct kvm_vcpu *vcpu,
static int kvm_vcpu_ioctl_x86_set_vcpu_events(struct kvm_vcpu *vcpu,
static int kvm_vcpu_ioctl_x86_set_vcpu_events(struct kvm_vcpu *vcpu,
					      struct kvm_vcpu_events *events)
					      struct kvm_vcpu_events *events)
{
{
	if (events->flags)
	if (events->flags & ~(KVM_VCPUEVENT_VALID_NMI_PENDING
			      | KVM_VCPUEVENT_VALID_SIPI_VECTOR))
		return -EINVAL;
		return -EINVAL;


	vcpu_load(vcpu);
	vcpu_load(vcpu);
@@ -1938,9 +1940,11 @@ static int kvm_vcpu_ioctl_x86_set_vcpu_events(struct kvm_vcpu *vcpu,
		kvm_pic_clear_isr_ack(vcpu->kvm);
		kvm_pic_clear_isr_ack(vcpu->kvm);


	vcpu->arch.nmi_injected = events->nmi.injected;
	vcpu->arch.nmi_injected = events->nmi.injected;
	if (events->flags & KVM_VCPUEVENT_VALID_NMI_PENDING)
		vcpu->arch.nmi_pending = events->nmi.pending;
		vcpu->arch.nmi_pending = events->nmi.pending;
	kvm_x86_ops->set_nmi_mask(vcpu, events->nmi.masked);
	kvm_x86_ops->set_nmi_mask(vcpu, events->nmi.masked);


	if (events->flags & KVM_VCPUEVENT_VALID_SIPI_VECTOR)
		vcpu->arch.sipi_vector = events->sipi_vector;
		vcpu->arch.sipi_vector = events->sipi_vector;


	vcpu_put(vcpu);
	vcpu_put(vcpu);