Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit b463a6f7 authored by Avi Kivity's avatar Avi Kivity
Browse files

KVM: Non-atomic interrupt injection



Change the interrupt injection code to work from preemptible, interrupts
enabled context.  This works by adding a ->cancel_injection() operation
that undoes an injection in case we were not able to actually enter the guest
(this condition could never happen with atomic injection).

Signed-off-by: default avatarAvi Kivity <avi@redhat.com>
parent 83422e17
Loading
Loading
Loading
Loading
+1 −0
Original line number Original line Diff line number Diff line
@@ -552,6 +552,7 @@ struct kvm_x86_ops {
	void (*queue_exception)(struct kvm_vcpu *vcpu, unsigned nr,
	void (*queue_exception)(struct kvm_vcpu *vcpu, unsigned nr,
				bool has_error_code, u32 error_code,
				bool has_error_code, u32 error_code,
				bool reinject);
				bool reinject);
	void (*cancel_injection)(struct kvm_vcpu *vcpu);
	int (*interrupt_allowed)(struct kvm_vcpu *vcpu);
	int (*interrupt_allowed)(struct kvm_vcpu *vcpu);
	int (*nmi_allowed)(struct kvm_vcpu *vcpu);
	int (*nmi_allowed)(struct kvm_vcpu *vcpu);
	bool (*get_nmi_mask)(struct kvm_vcpu *vcpu);
	bool (*get_nmi_mask)(struct kvm_vcpu *vcpu);
+12 −0
Original line number Original line Diff line number Diff line
@@ -3261,6 +3261,17 @@ static void svm_complete_interrupts(struct vcpu_svm *svm)
	}
	}
}
}


static void svm_cancel_injection(struct kvm_vcpu *vcpu)
{
	struct vcpu_svm *svm = to_svm(vcpu);
	struct vmcb_control_area *control = &svm->vmcb->control;

	control->exit_int_info = control->event_inj;
	control->exit_int_info_err = control->event_inj_err;
	control->event_inj = 0;
	svm_complete_interrupts(svm);
}

#ifdef CONFIG_X86_64
#ifdef CONFIG_X86_64
#define R "r"
#define R "r"
#else
#else
@@ -3631,6 +3642,7 @@ static struct kvm_x86_ops svm_x86_ops = {
	.set_irq = svm_set_irq,
	.set_irq = svm_set_irq,
	.set_nmi = svm_inject_nmi,
	.set_nmi = svm_inject_nmi,
	.queue_exception = svm_queue_exception,
	.queue_exception = svm_queue_exception,
	.cancel_injection = svm_cancel_injection,
	.interrupt_allowed = svm_interrupt_allowed,
	.interrupt_allowed = svm_interrupt_allowed,
	.nmi_allowed = svm_nmi_allowed,
	.nmi_allowed = svm_nmi_allowed,
	.get_nmi_mask = svm_get_nmi_mask,
	.get_nmi_mask = svm_get_nmi_mask,
+11 −0
Original line number Original line Diff line number Diff line
@@ -3895,6 +3895,16 @@ static void vmx_complete_interrupts(struct vcpu_vmx *vmx)
				  IDT_VECTORING_ERROR_CODE);
				  IDT_VECTORING_ERROR_CODE);
}
}


static void vmx_cancel_injection(struct kvm_vcpu *vcpu)
{
	__vmx_complete_interrupts(to_vmx(vcpu),
				  vmcs_read32(VM_ENTRY_INTR_INFO_FIELD),
				  VM_ENTRY_INSTRUCTION_LEN,
				  VM_ENTRY_EXCEPTION_ERROR_CODE);

	vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, 0);
}

/*
/*
 * Failure to inject an interrupt should give us the information
 * Failure to inject an interrupt should give us the information
 * in IDT_VECTORING_INFO_FIELD.  However, if the failure occurs
 * in IDT_VECTORING_INFO_FIELD.  However, if the failure occurs
@@ -4348,6 +4358,7 @@ static struct kvm_x86_ops vmx_x86_ops = {
	.set_irq = vmx_inject_irq,
	.set_irq = vmx_inject_irq,
	.set_nmi = vmx_inject_nmi,
	.set_nmi = vmx_inject_nmi,
	.queue_exception = vmx_queue_exception,
	.queue_exception = vmx_queue_exception,
	.cancel_injection = vmx_cancel_injection,
	.interrupt_allowed = vmx_interrupt_allowed,
	.interrupt_allowed = vmx_interrupt_allowed,
	.nmi_allowed = vmx_nmi_allowed,
	.nmi_allowed = vmx_nmi_allowed,
	.get_nmi_mask = vmx_get_nmi_mask,
	.get_nmi_mask = vmx_get_nmi_mask,
+16 −20
Original line number Original line Diff line number Diff line
@@ -5005,7 +5005,6 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
	int r;
	int r;
	bool req_int_win = !irqchip_in_kernel(vcpu->kvm) &&
	bool req_int_win = !irqchip_in_kernel(vcpu->kvm) &&
		vcpu->run->request_interrupt_window;
		vcpu->run->request_interrupt_window;
	bool req_event;


	if (vcpu->requests) {
	if (vcpu->requests) {
		if (kvm_check_request(KVM_REQ_MMU_RELOAD, vcpu))
		if (kvm_check_request(KVM_REQ_MMU_RELOAD, vcpu))
@@ -5041,6 +5040,21 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
	if (unlikely(r))
	if (unlikely(r))
		goto out;
		goto out;


	if (kvm_check_request(KVM_REQ_EVENT, vcpu) || req_int_win) {
		inject_pending_event(vcpu);

		/* enable NMI/IRQ window open exits if needed */
		if (vcpu->arch.nmi_pending)
			kvm_x86_ops->enable_nmi_window(vcpu);
		else if (kvm_cpu_has_interrupt(vcpu) || req_int_win)
			kvm_x86_ops->enable_irq_window(vcpu);

		if (kvm_lapic_enabled(vcpu)) {
			update_cr8_intercept(vcpu);
			kvm_lapic_sync_to_vapic(vcpu);
		}
	}

	preempt_disable();
	preempt_disable();


	kvm_x86_ops->prepare_guest_switch(vcpu);
	kvm_x86_ops->prepare_guest_switch(vcpu);
@@ -5053,35 +5067,17 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)


	local_irq_disable();
	local_irq_disable();


	req_event = kvm_check_request(KVM_REQ_EVENT, vcpu);

	if (!atomic_read(&vcpu->guest_mode) || vcpu->requests
	if (!atomic_read(&vcpu->guest_mode) || vcpu->requests
	    || need_resched() || signal_pending(current)) {
	    || need_resched() || signal_pending(current)) {
		if (req_event)
			kvm_make_request(KVM_REQ_EVENT, vcpu);
		atomic_set(&vcpu->guest_mode, 0);
		atomic_set(&vcpu->guest_mode, 0);
		smp_wmb();
		smp_wmb();
		local_irq_enable();
		local_irq_enable();
		preempt_enable();
		preempt_enable();
		kvm_x86_ops->cancel_injection(vcpu);
		r = 1;
		r = 1;
		goto out;
		goto out;
	}
	}


	if (req_event || req_int_win) {
		inject_pending_event(vcpu);

		/* enable NMI/IRQ window open exits if needed */
		if (vcpu->arch.nmi_pending)
			kvm_x86_ops->enable_nmi_window(vcpu);
		else if (kvm_cpu_has_interrupt(vcpu) || req_int_win)
			kvm_x86_ops->enable_irq_window(vcpu);

		if (kvm_lapic_enabled(vcpu)) {
			update_cr8_intercept(vcpu);
			kvm_lapic_sync_to_vapic(vcpu);
		}
	}

	srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
	srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);


	kvm_guest_enter();
	kvm_guest_enter();