Loading arch/x86/include/asm/kvm_host.h +7 −1 Original line number Original line Diff line number Diff line Loading @@ -512,10 +512,15 @@ struct kvm_x86_ops { unsigned char *hypercall_addr); unsigned char *hypercall_addr); int (*get_irq)(struct kvm_vcpu *vcpu); int (*get_irq)(struct kvm_vcpu *vcpu); void (*set_irq)(struct kvm_vcpu *vcpu, int vec); void (*set_irq)(struct kvm_vcpu *vcpu, int vec); void (*set_nmi)(struct kvm_vcpu *vcpu); void (*queue_exception)(struct kvm_vcpu *vcpu, unsigned nr, void (*queue_exception)(struct kvm_vcpu *vcpu, unsigned nr, bool has_error_code, u32 error_code); bool has_error_code, u32 error_code); void (*inject_pending_irq)(struct kvm_vcpu *vcpu, struct kvm_run *run); int (*interrupt_allowed)(struct kvm_vcpu *vcpu); int (*interrupt_allowed)(struct kvm_vcpu *vcpu); int (*nmi_allowed)(struct kvm_vcpu *vcpu); void (*enable_nmi_window)(struct kvm_vcpu *vcpu); void (*enable_irq_window)(struct kvm_vcpu *vcpu); void (*update_cr8_intercept)(struct kvm_vcpu *vcpu, int tpr, int irr); void (*drop_interrupt_shadow)(struct kvm_vcpu *vcpu); int (*set_tss_addr)(struct kvm *kvm, unsigned int addr); int (*set_tss_addr)(struct kvm *kvm, unsigned int addr); int (*get_tdp_level)(void); int (*get_tdp_level)(void); int (*get_mt_mask_shift)(void); int (*get_mt_mask_shift)(void); Loading Loading @@ -763,6 +768,7 @@ enum { #define HF_GIF_MASK (1 << 0) #define HF_GIF_MASK (1 << 0) #define HF_HIF_MASK (1 << 1) #define HF_HIF_MASK (1 << 1) #define HF_VINTR_MASK (1 << 2) #define HF_VINTR_MASK (1 << 2) #define HF_NMI_MASK (1 << 3) /* /* * Hardware virtualization extension instructions may fault if a * Hardware virtualization extension instructions may fault if a Loading arch/x86/kvm/svm.c +50 −46 Original line number Original line Diff line number Diff line Loading @@ -1843,6 +1843,14 @@ static int cpuid_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run) return 1; return 1; } } static int iret_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run) { ++svm->vcpu.stat.nmi_window_exits; svm->vmcb->control.intercept &= ~(1UL << INTERCEPT_IRET); svm->vcpu.arch.hflags &= ~HF_NMI_MASK; return 1; } static int invlpg_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run) static int invlpg_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run) { { if (emulate_instruction(&svm->vcpu, kvm_run, 0, 0, 0) != EMULATE_DONE) if (emulate_instruction(&svm->vcpu, kvm_run, 0, 0, 0) != EMULATE_DONE) Loading @@ -1863,8 +1871,10 @@ static int cr8_write_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run) u8 cr8_prev = kvm_get_cr8(&svm->vcpu); u8 cr8_prev = kvm_get_cr8(&svm->vcpu); /* instruction emulation calls kvm_set_cr8() */ /* instruction emulation calls kvm_set_cr8() */ emulate_instruction(&svm->vcpu, NULL, 0, 0, 0); emulate_instruction(&svm->vcpu, NULL, 0, 0, 0); if (irqchip_in_kernel(svm->vcpu.kvm)) if (irqchip_in_kernel(svm->vcpu.kvm)) { svm->vmcb->control.intercept_cr_write &= ~INTERCEPT_CR8_MASK; return 1; return 1; } if (cr8_prev <= kvm_get_cr8(&svm->vcpu)) if (cr8_prev <= kvm_get_cr8(&svm->vcpu)) return 1; return 1; kvm_run->exit_reason = KVM_EXIT_SET_TPR; kvm_run->exit_reason = KVM_EXIT_SET_TPR; Loading Loading @@ -2120,6 +2130,7 @@ static int (*svm_exit_handlers[])(struct vcpu_svm *svm, [SVM_EXIT_VINTR] = interrupt_window_interception, [SVM_EXIT_VINTR] = interrupt_window_interception, /* [SVM_EXIT_CR0_SEL_WRITE] = emulate_on_interception, */ /* [SVM_EXIT_CR0_SEL_WRITE] = emulate_on_interception, */ [SVM_EXIT_CPUID] = cpuid_interception, [SVM_EXIT_CPUID] = cpuid_interception, [SVM_EXIT_IRET] = iret_interception, [SVM_EXIT_INVD] = emulate_on_interception, [SVM_EXIT_INVD] = emulate_on_interception, [SVM_EXIT_HLT] = halt_interception, [SVM_EXIT_HLT] = halt_interception, [SVM_EXIT_INVLPG] = invlpg_interception, [SVM_EXIT_INVLPG] = invlpg_interception, Loading Loading @@ -2227,6 +2238,21 @@ static void pre_svm_run(struct vcpu_svm *svm) new_asid(svm, svm_data); new_asid(svm, svm_data); } } static void svm_drop_interrupt_shadow(struct kvm_vcpu *vcpu) { struct vcpu_svm *svm = to_svm(vcpu); svm->vmcb->control.int_state &= ~SVM_INTERRUPT_SHADOW_MASK; } static void svm_inject_nmi(struct kvm_vcpu *vcpu) { struct vcpu_svm *svm = to_svm(vcpu); svm->vmcb->control.event_inj = SVM_EVTINJ_VALID | SVM_EVTINJ_TYPE_NMI; vcpu->arch.hflags |= HF_NMI_MASK; svm->vmcb->control.intercept |= (1UL << INTERCEPT_IRET); ++vcpu->stat.nmi_injections; } static inline void svm_inject_irq(struct vcpu_svm *svm, int irq) static inline void svm_inject_irq(struct vcpu_svm *svm, int irq) { { Loading @@ -2242,8 +2268,10 @@ static inline void svm_inject_irq(struct vcpu_svm *svm, int irq) ((/*control->int_vector >> 4*/ 0xf) << V_INTR_PRIO_SHIFT); ((/*control->int_vector >> 4*/ 0xf) << V_INTR_PRIO_SHIFT); } } static void svm_queue_irq(struct vcpu_svm *svm, unsigned nr) static void svm_queue_irq(struct kvm_vcpu *vcpu, unsigned nr) { { struct vcpu_svm *svm = to_svm(vcpu); svm->vmcb->control.event_inj = nr | svm->vmcb->control.event_inj = nr | SVM_EVTINJ_VALID | SVM_EVTINJ_TYPE_INTR; SVM_EVTINJ_VALID | SVM_EVTINJ_TYPE_INTR; } } Loading @@ -2254,28 +2282,26 @@ static void svm_set_irq(struct kvm_vcpu *vcpu, int irq) nested_svm_intr(svm); nested_svm_intr(svm); svm_queue_irq(svm, irq); svm_queue_irq(vcpu, irq); } } static void update_cr8_intercept(struct kvm_vcpu *vcpu) static void update_cr8_intercept(struct kvm_vcpu *vcpu, int tpr, int irr) { { struct vcpu_svm *svm = to_svm(vcpu); struct vcpu_svm *svm = to_svm(vcpu); struct vmcb *vmcb = svm->vmcb; int max_irr, tpr; if (!irqchip_in_kernel(vcpu->kvm) || vcpu->arch.apic->vapic_addr) if (irr == -1) return; return; vmcb->control.intercept_cr_write &= ~INTERCEPT_CR8_MASK; if (tpr >= irr) svm->vmcb->control.intercept_cr_write |= INTERCEPT_CR8_MASK; max_irr = kvm_lapic_find_highest_irr(vcpu); } if (max_irr == -1) return; tpr = kvm_lapic_get_cr8(vcpu) << 4; if (tpr >= (max_irr & 0xf0)) static int svm_nmi_allowed(struct kvm_vcpu *vcpu) vmcb->control.intercept_cr_write |= INTERCEPT_CR8_MASK; { struct vcpu_svm *svm = to_svm(vcpu); struct vmcb *vmcb = svm->vmcb; return !(vmcb->control.int_state & SVM_INTERRUPT_SHADOW_MASK) && !(svm->vcpu.arch.hflags & HF_NMI_MASK); } } static int svm_interrupt_allowed(struct kvm_vcpu *vcpu) static int svm_interrupt_allowed(struct kvm_vcpu *vcpu) Loading @@ -2293,39 +2319,12 @@ static void enable_irq_window(struct kvm_vcpu *vcpu) svm_inject_irq(to_svm(vcpu), 0x0); svm_inject_irq(to_svm(vcpu), 0x0); } } static void svm_intr_inject(struct kvm_vcpu *vcpu) static void enable_nmi_window(struct kvm_vcpu *vcpu) { /* try to reinject previous events if any */ if (vcpu->arch.interrupt.pending) { svm_queue_irq(to_svm(vcpu), vcpu->arch.interrupt.nr); return; } /* try to inject new event if pending */ if (kvm_cpu_has_interrupt(vcpu)) { if (svm_interrupt_allowed(vcpu)) { kvm_queue_interrupt(vcpu, kvm_cpu_get_interrupt(vcpu)); svm_queue_irq(to_svm(vcpu), vcpu->arch.interrupt.nr); } } } static void svm_intr_assist(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) { { struct vcpu_svm *svm = to_svm(vcpu); struct vcpu_svm *svm = to_svm(vcpu); bool req_int_win = !irqchip_in_kernel(vcpu->kvm) && kvm_run->request_interrupt_window; if (nested_svm_intr(svm)) if (svm->vmcb->control.int_state & SVM_INTERRUPT_SHADOW_MASK) goto out; svm_intr_inject(vcpu); if (kvm_cpu_has_interrupt(vcpu) || req_int_win) enable_irq_window(vcpu); enable_irq_window(vcpu); out: update_cr8_intercept(vcpu); } } static int svm_set_tss_addr(struct kvm *kvm, unsigned int addr) static int svm_set_tss_addr(struct kvm *kvm, unsigned int addr) Loading Loading @@ -2650,9 +2649,14 @@ static struct kvm_x86_ops svm_x86_ops = { .patch_hypercall = svm_patch_hypercall, .patch_hypercall = svm_patch_hypercall, .get_irq = svm_get_irq, .get_irq = svm_get_irq, .set_irq = svm_set_irq, .set_irq = svm_set_irq, .set_nmi = svm_inject_nmi, .queue_exception = svm_queue_exception, .queue_exception = svm_queue_exception, .inject_pending_irq = svm_intr_assist, .interrupt_allowed = svm_interrupt_allowed, .interrupt_allowed = svm_interrupt_allowed, .nmi_allowed = svm_nmi_allowed, .enable_nmi_window = enable_nmi_window, .enable_irq_window = enable_irq_window, .update_cr8_intercept = update_cr8_intercept, .drop_interrupt_shadow = svm_drop_interrupt_shadow, .set_tss_addr = svm_set_tss_addr, .set_tss_addr = svm_set_tss_addr, .get_tdp_level = get_npt_level, .get_tdp_level = get_npt_level, Loading arch/x86/kvm/vmx.c +19 −60 Original line number Original line Diff line number Diff line Loading @@ -1314,6 +1314,9 @@ static __init int hardware_setup(void) if (!cpu_has_vmx_flexpriority()) if (!cpu_has_vmx_flexpriority()) flexpriority_enabled = 0; flexpriority_enabled = 0; if (!cpu_has_vmx_tpr_shadow()) kvm_x86_ops->update_cr8_intercept = NULL; return alloc_kvm_area(); return alloc_kvm_area(); } } Loading Loading @@ -2404,6 +2407,12 @@ static int vmx_vcpu_reset(struct kvm_vcpu *vcpu) return ret; return ret; } } void vmx_drop_interrupt_shadow(struct kvm_vcpu *vcpu) { vmcs_clear_bits(GUEST_INTERRUPTIBILITY_INFO, GUEST_INTR_STATE_STI | GUEST_INTR_STATE_MOV_SS); } static void enable_irq_window(struct kvm_vcpu *vcpu) static void enable_irq_window(struct kvm_vcpu *vcpu) { { u32 cpu_based_vm_exec_control; u32 cpu_based_vm_exec_control; Loading Loading @@ -3214,21 +3223,14 @@ static int vmx_handle_exit(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu) return 0; return 0; } } static void update_tpr_threshold(struct kvm_vcpu *vcpu) static void update_cr8_intercept(struct kvm_vcpu *vcpu, int tpr, int irr) { { int max_irr, tpr; if (irr == -1 || tpr < irr) { if (!vm_need_tpr_shadow(vcpu->kvm)) return; if (!kvm_lapic_enabled(vcpu) || ((max_irr = kvm_lapic_find_highest_irr(vcpu)) == -1)) { vmcs_write32(TPR_THRESHOLD, 0); vmcs_write32(TPR_THRESHOLD, 0); return; return; } } tpr = (kvm_lapic_get_cr8(vcpu) & 0x0f) << 4; vmcs_write32(TPR_THRESHOLD, irr); vmcs_write32(TPR_THRESHOLD, (max_irr > tpr) ? tpr >> 4 : max_irr >> 4); } } static void vmx_complete_interrupts(struct vcpu_vmx *vmx) static void vmx_complete_interrupts(struct vcpu_vmx *vmx) Loading Loading @@ -3300,55 +3302,6 @@ static void vmx_complete_interrupts(struct vcpu_vmx *vmx) } } } } static void vmx_intr_inject(struct kvm_vcpu *vcpu) { /* try to reinject previous events if any */ if (vcpu->arch.nmi_injected) { vmx_inject_nmi(vcpu); return; } if (vcpu->arch.interrupt.pending) { vmx_inject_irq(vcpu, vcpu->arch.interrupt.nr); return; } /* try to inject new event if pending */ if (vcpu->arch.nmi_pending) { if (vmx_nmi_allowed(vcpu)) { vcpu->arch.nmi_pending = false; vcpu->arch.nmi_injected = true; vmx_inject_nmi(vcpu); } } else if (kvm_cpu_has_interrupt(vcpu)) { if (vmx_interrupt_allowed(vcpu)) { kvm_queue_interrupt(vcpu, kvm_cpu_get_interrupt(vcpu)); vmx_inject_irq(vcpu, vcpu->arch.interrupt.nr); } } } static void vmx_intr_assist(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) { bool req_int_win = !irqchip_in_kernel(vcpu->kvm) && kvm_run->request_interrupt_window; update_tpr_threshold(vcpu); if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP) vmcs_clear_bits(GUEST_INTERRUPTIBILITY_INFO, GUEST_INTR_STATE_STI | GUEST_INTR_STATE_MOV_SS); vmx_intr_inject(vcpu); /* enable NMI/IRQ window open exits if needed */ if (vcpu->arch.nmi_pending) enable_nmi_window(vcpu); else if (kvm_cpu_has_interrupt(vcpu) || req_int_win) enable_irq_window(vcpu); } /* /* * Failure to inject an interrupt should give us the information * Failure to inject an interrupt should give us the information * in IDT_VECTORING_INFO_FIELD. However, if the failure occurs * in IDT_VECTORING_INFO_FIELD. However, if the failure occurs Loading Loading @@ -3683,9 +3636,15 @@ static struct kvm_x86_ops vmx_x86_ops = { .patch_hypercall = vmx_patch_hypercall, .patch_hypercall = vmx_patch_hypercall, .get_irq = vmx_get_irq, .get_irq = vmx_get_irq, .set_irq = vmx_inject_irq, .set_irq = vmx_inject_irq, .set_nmi = vmx_inject_nmi, .queue_exception = vmx_queue_exception, .queue_exception = vmx_queue_exception, .inject_pending_irq = vmx_intr_assist, .interrupt_allowed = vmx_interrupt_allowed, .interrupt_allowed = vmx_interrupt_allowed, .nmi_allowed = vmx_nmi_allowed, .enable_nmi_window = enable_nmi_window, .enable_irq_window = enable_irq_window, .update_cr8_intercept = update_cr8_intercept, .drop_interrupt_shadow = vmx_drop_interrupt_shadow, .set_tss_addr = vmx_set_tss_addr, .set_tss_addr = vmx_set_tss_addr, .get_tdp_level = get_ept_level, .get_tdp_level = get_ept_level, .get_mt_mask_shift = vmx_get_mt_mask_shift, .get_mt_mask_shift = vmx_get_mt_mask_shift, Loading arch/x86/kvm/x86.c +69 −2 Original line number Original line Diff line number Diff line Loading @@ -3114,6 +3114,68 @@ static void vapic_exit(struct kvm_vcpu *vcpu) up_read(&vcpu->kvm->slots_lock); up_read(&vcpu->kvm->slots_lock); } } static void update_cr8_intercept(struct kvm_vcpu *vcpu) { int max_irr, tpr; if (!kvm_x86_ops->update_cr8_intercept) return; max_irr = kvm_lapic_find_highest_irr(vcpu); if (max_irr != -1) max_irr >>= 4; tpr = kvm_lapic_get_cr8(vcpu); kvm_x86_ops->update_cr8_intercept(vcpu, tpr, max_irr); } static void inject_irq(struct kvm_vcpu *vcpu) { /* try to reinject previous events if any */ if (vcpu->arch.nmi_injected) { kvm_x86_ops->set_nmi(vcpu); return; } if (vcpu->arch.interrupt.pending) { kvm_x86_ops->set_irq(vcpu, vcpu->arch.interrupt.nr); return; } /* try to inject new event if pending */ if (vcpu->arch.nmi_pending) { if (kvm_x86_ops->nmi_allowed(vcpu)) { vcpu->arch.nmi_pending = false; vcpu->arch.nmi_injected = true; kvm_x86_ops->set_nmi(vcpu); } } else if (kvm_cpu_has_interrupt(vcpu)) { if (kvm_x86_ops->interrupt_allowed(vcpu)) { kvm_queue_interrupt(vcpu, kvm_cpu_get_interrupt(vcpu)); kvm_x86_ops->set_irq(vcpu, vcpu->arch.interrupt.nr); } } } static void inject_pending_irq(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) { bool req_int_win = !irqchip_in_kernel(vcpu->kvm) && kvm_run->request_interrupt_window; if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP) kvm_x86_ops->drop_interrupt_shadow(vcpu); inject_irq(vcpu); /* enable NMI/IRQ window open exits if needed */ if (vcpu->arch.nmi_pending) kvm_x86_ops->enable_nmi_window(vcpu); else if (kvm_cpu_has_interrupt(vcpu) || req_int_win) kvm_x86_ops->enable_irq_window(vcpu); } static int vcpu_enter_guest(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) static int vcpu_enter_guest(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) { { int r; int r; Loading Loading @@ -3172,9 +3234,14 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) if (vcpu->arch.exception.pending) if (vcpu->arch.exception.pending) __queue_exception(vcpu); __queue_exception(vcpu); else else kvm_x86_ops->inject_pending_irq(vcpu, kvm_run); inject_pending_irq(vcpu, kvm_run); if (kvm_lapic_enabled(vcpu)) { if (!vcpu->arch.apic->vapic_addr) update_cr8_intercept(vcpu); else kvm_lapic_sync_to_vapic(vcpu); kvm_lapic_sync_to_vapic(vcpu); } up_read(&vcpu->kvm->slots_lock); up_read(&vcpu->kvm->slots_lock); Loading Loading
arch/x86/include/asm/kvm_host.h +7 −1 Original line number Original line Diff line number Diff line Loading @@ -512,10 +512,15 @@ struct kvm_x86_ops { unsigned char *hypercall_addr); unsigned char *hypercall_addr); int (*get_irq)(struct kvm_vcpu *vcpu); int (*get_irq)(struct kvm_vcpu *vcpu); void (*set_irq)(struct kvm_vcpu *vcpu, int vec); void (*set_irq)(struct kvm_vcpu *vcpu, int vec); void (*set_nmi)(struct kvm_vcpu *vcpu); void (*queue_exception)(struct kvm_vcpu *vcpu, unsigned nr, void (*queue_exception)(struct kvm_vcpu *vcpu, unsigned nr, bool has_error_code, u32 error_code); bool has_error_code, u32 error_code); void (*inject_pending_irq)(struct kvm_vcpu *vcpu, struct kvm_run *run); int (*interrupt_allowed)(struct kvm_vcpu *vcpu); int (*interrupt_allowed)(struct kvm_vcpu *vcpu); int (*nmi_allowed)(struct kvm_vcpu *vcpu); void (*enable_nmi_window)(struct kvm_vcpu *vcpu); void (*enable_irq_window)(struct kvm_vcpu *vcpu); void (*update_cr8_intercept)(struct kvm_vcpu *vcpu, int tpr, int irr); void (*drop_interrupt_shadow)(struct kvm_vcpu *vcpu); int (*set_tss_addr)(struct kvm *kvm, unsigned int addr); int (*set_tss_addr)(struct kvm *kvm, unsigned int addr); int (*get_tdp_level)(void); int (*get_tdp_level)(void); int (*get_mt_mask_shift)(void); int (*get_mt_mask_shift)(void); Loading Loading @@ -763,6 +768,7 @@ enum { #define HF_GIF_MASK (1 << 0) #define HF_GIF_MASK (1 << 0) #define HF_HIF_MASK (1 << 1) #define HF_HIF_MASK (1 << 1) #define HF_VINTR_MASK (1 << 2) #define HF_VINTR_MASK (1 << 2) #define HF_NMI_MASK (1 << 3) /* /* * Hardware virtualization extension instructions may fault if a * Hardware virtualization extension instructions may fault if a Loading
arch/x86/kvm/svm.c +50 −46 Original line number Original line Diff line number Diff line Loading @@ -1843,6 +1843,14 @@ static int cpuid_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run) return 1; return 1; } } static int iret_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run) { ++svm->vcpu.stat.nmi_window_exits; svm->vmcb->control.intercept &= ~(1UL << INTERCEPT_IRET); svm->vcpu.arch.hflags &= ~HF_NMI_MASK; return 1; } static int invlpg_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run) static int invlpg_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run) { { if (emulate_instruction(&svm->vcpu, kvm_run, 0, 0, 0) != EMULATE_DONE) if (emulate_instruction(&svm->vcpu, kvm_run, 0, 0, 0) != EMULATE_DONE) Loading @@ -1863,8 +1871,10 @@ static int cr8_write_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run) u8 cr8_prev = kvm_get_cr8(&svm->vcpu); u8 cr8_prev = kvm_get_cr8(&svm->vcpu); /* instruction emulation calls kvm_set_cr8() */ /* instruction emulation calls kvm_set_cr8() */ emulate_instruction(&svm->vcpu, NULL, 0, 0, 0); emulate_instruction(&svm->vcpu, NULL, 0, 0, 0); if (irqchip_in_kernel(svm->vcpu.kvm)) if (irqchip_in_kernel(svm->vcpu.kvm)) { svm->vmcb->control.intercept_cr_write &= ~INTERCEPT_CR8_MASK; return 1; return 1; } if (cr8_prev <= kvm_get_cr8(&svm->vcpu)) if (cr8_prev <= kvm_get_cr8(&svm->vcpu)) return 1; return 1; kvm_run->exit_reason = KVM_EXIT_SET_TPR; kvm_run->exit_reason = KVM_EXIT_SET_TPR; Loading Loading @@ -2120,6 +2130,7 @@ static int (*svm_exit_handlers[])(struct vcpu_svm *svm, [SVM_EXIT_VINTR] = interrupt_window_interception, [SVM_EXIT_VINTR] = interrupt_window_interception, /* [SVM_EXIT_CR0_SEL_WRITE] = emulate_on_interception, */ /* [SVM_EXIT_CR0_SEL_WRITE] = emulate_on_interception, */ [SVM_EXIT_CPUID] = cpuid_interception, [SVM_EXIT_CPUID] = cpuid_interception, [SVM_EXIT_IRET] = iret_interception, [SVM_EXIT_INVD] = emulate_on_interception, [SVM_EXIT_INVD] = emulate_on_interception, [SVM_EXIT_HLT] = halt_interception, [SVM_EXIT_HLT] = halt_interception, [SVM_EXIT_INVLPG] = invlpg_interception, [SVM_EXIT_INVLPG] = invlpg_interception, Loading Loading @@ -2227,6 +2238,21 @@ static void pre_svm_run(struct vcpu_svm *svm) new_asid(svm, svm_data); new_asid(svm, svm_data); } } static void svm_drop_interrupt_shadow(struct kvm_vcpu *vcpu) { struct vcpu_svm *svm = to_svm(vcpu); svm->vmcb->control.int_state &= ~SVM_INTERRUPT_SHADOW_MASK; } static void svm_inject_nmi(struct kvm_vcpu *vcpu) { struct vcpu_svm *svm = to_svm(vcpu); svm->vmcb->control.event_inj = SVM_EVTINJ_VALID | SVM_EVTINJ_TYPE_NMI; vcpu->arch.hflags |= HF_NMI_MASK; svm->vmcb->control.intercept |= (1UL << INTERCEPT_IRET); ++vcpu->stat.nmi_injections; } static inline void svm_inject_irq(struct vcpu_svm *svm, int irq) static inline void svm_inject_irq(struct vcpu_svm *svm, int irq) { { Loading @@ -2242,8 +2268,10 @@ static inline void svm_inject_irq(struct vcpu_svm *svm, int irq) ((/*control->int_vector >> 4*/ 0xf) << V_INTR_PRIO_SHIFT); ((/*control->int_vector >> 4*/ 0xf) << V_INTR_PRIO_SHIFT); } } static void svm_queue_irq(struct vcpu_svm *svm, unsigned nr) static void svm_queue_irq(struct kvm_vcpu *vcpu, unsigned nr) { { struct vcpu_svm *svm = to_svm(vcpu); svm->vmcb->control.event_inj = nr | svm->vmcb->control.event_inj = nr | SVM_EVTINJ_VALID | SVM_EVTINJ_TYPE_INTR; SVM_EVTINJ_VALID | SVM_EVTINJ_TYPE_INTR; } } Loading @@ -2254,28 +2282,26 @@ static void svm_set_irq(struct kvm_vcpu *vcpu, int irq) nested_svm_intr(svm); nested_svm_intr(svm); svm_queue_irq(svm, irq); svm_queue_irq(vcpu, irq); } } static void update_cr8_intercept(struct kvm_vcpu *vcpu) static void update_cr8_intercept(struct kvm_vcpu *vcpu, int tpr, int irr) { { struct vcpu_svm *svm = to_svm(vcpu); struct vcpu_svm *svm = to_svm(vcpu); struct vmcb *vmcb = svm->vmcb; int max_irr, tpr; if (!irqchip_in_kernel(vcpu->kvm) || vcpu->arch.apic->vapic_addr) if (irr == -1) return; return; vmcb->control.intercept_cr_write &= ~INTERCEPT_CR8_MASK; if (tpr >= irr) svm->vmcb->control.intercept_cr_write |= INTERCEPT_CR8_MASK; max_irr = kvm_lapic_find_highest_irr(vcpu); } if (max_irr == -1) return; tpr = kvm_lapic_get_cr8(vcpu) << 4; if (tpr >= (max_irr & 0xf0)) static int svm_nmi_allowed(struct kvm_vcpu *vcpu) vmcb->control.intercept_cr_write |= INTERCEPT_CR8_MASK; { struct vcpu_svm *svm = to_svm(vcpu); struct vmcb *vmcb = svm->vmcb; return !(vmcb->control.int_state & SVM_INTERRUPT_SHADOW_MASK) && !(svm->vcpu.arch.hflags & HF_NMI_MASK); } } static int svm_interrupt_allowed(struct kvm_vcpu *vcpu) static int svm_interrupt_allowed(struct kvm_vcpu *vcpu) Loading @@ -2293,39 +2319,12 @@ static void enable_irq_window(struct kvm_vcpu *vcpu) svm_inject_irq(to_svm(vcpu), 0x0); svm_inject_irq(to_svm(vcpu), 0x0); } } static void svm_intr_inject(struct kvm_vcpu *vcpu) static void enable_nmi_window(struct kvm_vcpu *vcpu) { /* try to reinject previous events if any */ if (vcpu->arch.interrupt.pending) { svm_queue_irq(to_svm(vcpu), vcpu->arch.interrupt.nr); return; } /* try to inject new event if pending */ if (kvm_cpu_has_interrupt(vcpu)) { if (svm_interrupt_allowed(vcpu)) { kvm_queue_interrupt(vcpu, kvm_cpu_get_interrupt(vcpu)); svm_queue_irq(to_svm(vcpu), vcpu->arch.interrupt.nr); } } } static void svm_intr_assist(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) { { struct vcpu_svm *svm = to_svm(vcpu); struct vcpu_svm *svm = to_svm(vcpu); bool req_int_win = !irqchip_in_kernel(vcpu->kvm) && kvm_run->request_interrupt_window; if (nested_svm_intr(svm)) if (svm->vmcb->control.int_state & SVM_INTERRUPT_SHADOW_MASK) goto out; svm_intr_inject(vcpu); if (kvm_cpu_has_interrupt(vcpu) || req_int_win) enable_irq_window(vcpu); enable_irq_window(vcpu); out: update_cr8_intercept(vcpu); } } static int svm_set_tss_addr(struct kvm *kvm, unsigned int addr) static int svm_set_tss_addr(struct kvm *kvm, unsigned int addr) Loading Loading @@ -2650,9 +2649,14 @@ static struct kvm_x86_ops svm_x86_ops = { .patch_hypercall = svm_patch_hypercall, .patch_hypercall = svm_patch_hypercall, .get_irq = svm_get_irq, .get_irq = svm_get_irq, .set_irq = svm_set_irq, .set_irq = svm_set_irq, .set_nmi = svm_inject_nmi, .queue_exception = svm_queue_exception, .queue_exception = svm_queue_exception, .inject_pending_irq = svm_intr_assist, .interrupt_allowed = svm_interrupt_allowed, .interrupt_allowed = svm_interrupt_allowed, .nmi_allowed = svm_nmi_allowed, .enable_nmi_window = enable_nmi_window, .enable_irq_window = enable_irq_window, .update_cr8_intercept = update_cr8_intercept, .drop_interrupt_shadow = svm_drop_interrupt_shadow, .set_tss_addr = svm_set_tss_addr, .set_tss_addr = svm_set_tss_addr, .get_tdp_level = get_npt_level, .get_tdp_level = get_npt_level, Loading
arch/x86/kvm/vmx.c +19 −60 Original line number Original line Diff line number Diff line Loading @@ -1314,6 +1314,9 @@ static __init int hardware_setup(void) if (!cpu_has_vmx_flexpriority()) if (!cpu_has_vmx_flexpriority()) flexpriority_enabled = 0; flexpriority_enabled = 0; if (!cpu_has_vmx_tpr_shadow()) kvm_x86_ops->update_cr8_intercept = NULL; return alloc_kvm_area(); return alloc_kvm_area(); } } Loading Loading @@ -2404,6 +2407,12 @@ static int vmx_vcpu_reset(struct kvm_vcpu *vcpu) return ret; return ret; } } void vmx_drop_interrupt_shadow(struct kvm_vcpu *vcpu) { vmcs_clear_bits(GUEST_INTERRUPTIBILITY_INFO, GUEST_INTR_STATE_STI | GUEST_INTR_STATE_MOV_SS); } static void enable_irq_window(struct kvm_vcpu *vcpu) static void enable_irq_window(struct kvm_vcpu *vcpu) { { u32 cpu_based_vm_exec_control; u32 cpu_based_vm_exec_control; Loading Loading @@ -3214,21 +3223,14 @@ static int vmx_handle_exit(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu) return 0; return 0; } } static void update_tpr_threshold(struct kvm_vcpu *vcpu) static void update_cr8_intercept(struct kvm_vcpu *vcpu, int tpr, int irr) { { int max_irr, tpr; if (irr == -1 || tpr < irr) { if (!vm_need_tpr_shadow(vcpu->kvm)) return; if (!kvm_lapic_enabled(vcpu) || ((max_irr = kvm_lapic_find_highest_irr(vcpu)) == -1)) { vmcs_write32(TPR_THRESHOLD, 0); vmcs_write32(TPR_THRESHOLD, 0); return; return; } } tpr = (kvm_lapic_get_cr8(vcpu) & 0x0f) << 4; vmcs_write32(TPR_THRESHOLD, irr); vmcs_write32(TPR_THRESHOLD, (max_irr > tpr) ? tpr >> 4 : max_irr >> 4); } } static void vmx_complete_interrupts(struct vcpu_vmx *vmx) static void vmx_complete_interrupts(struct vcpu_vmx *vmx) Loading Loading @@ -3300,55 +3302,6 @@ static void vmx_complete_interrupts(struct vcpu_vmx *vmx) } } } } static void vmx_intr_inject(struct kvm_vcpu *vcpu) { /* try to reinject previous events if any */ if (vcpu->arch.nmi_injected) { vmx_inject_nmi(vcpu); return; } if (vcpu->arch.interrupt.pending) { vmx_inject_irq(vcpu, vcpu->arch.interrupt.nr); return; } /* try to inject new event if pending */ if (vcpu->arch.nmi_pending) { if (vmx_nmi_allowed(vcpu)) { vcpu->arch.nmi_pending = false; vcpu->arch.nmi_injected = true; vmx_inject_nmi(vcpu); } } else if (kvm_cpu_has_interrupt(vcpu)) { if (vmx_interrupt_allowed(vcpu)) { kvm_queue_interrupt(vcpu, kvm_cpu_get_interrupt(vcpu)); vmx_inject_irq(vcpu, vcpu->arch.interrupt.nr); } } } static void vmx_intr_assist(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) { bool req_int_win = !irqchip_in_kernel(vcpu->kvm) && kvm_run->request_interrupt_window; update_tpr_threshold(vcpu); if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP) vmcs_clear_bits(GUEST_INTERRUPTIBILITY_INFO, GUEST_INTR_STATE_STI | GUEST_INTR_STATE_MOV_SS); vmx_intr_inject(vcpu); /* enable NMI/IRQ window open exits if needed */ if (vcpu->arch.nmi_pending) enable_nmi_window(vcpu); else if (kvm_cpu_has_interrupt(vcpu) || req_int_win) enable_irq_window(vcpu); } /* /* * Failure to inject an interrupt should give us the information * Failure to inject an interrupt should give us the information * in IDT_VECTORING_INFO_FIELD. However, if the failure occurs * in IDT_VECTORING_INFO_FIELD. However, if the failure occurs Loading Loading @@ -3683,9 +3636,15 @@ static struct kvm_x86_ops vmx_x86_ops = { .patch_hypercall = vmx_patch_hypercall, .patch_hypercall = vmx_patch_hypercall, .get_irq = vmx_get_irq, .get_irq = vmx_get_irq, .set_irq = vmx_inject_irq, .set_irq = vmx_inject_irq, .set_nmi = vmx_inject_nmi, .queue_exception = vmx_queue_exception, .queue_exception = vmx_queue_exception, .inject_pending_irq = vmx_intr_assist, .interrupt_allowed = vmx_interrupt_allowed, .interrupt_allowed = vmx_interrupt_allowed, .nmi_allowed = vmx_nmi_allowed, .enable_nmi_window = enable_nmi_window, .enable_irq_window = enable_irq_window, .update_cr8_intercept = update_cr8_intercept, .drop_interrupt_shadow = vmx_drop_interrupt_shadow, .set_tss_addr = vmx_set_tss_addr, .set_tss_addr = vmx_set_tss_addr, .get_tdp_level = get_ept_level, .get_tdp_level = get_ept_level, .get_mt_mask_shift = vmx_get_mt_mask_shift, .get_mt_mask_shift = vmx_get_mt_mask_shift, Loading
arch/x86/kvm/x86.c +69 −2 Original line number Original line Diff line number Diff line Loading @@ -3114,6 +3114,68 @@ static void vapic_exit(struct kvm_vcpu *vcpu) up_read(&vcpu->kvm->slots_lock); up_read(&vcpu->kvm->slots_lock); } } static void update_cr8_intercept(struct kvm_vcpu *vcpu) { int max_irr, tpr; if (!kvm_x86_ops->update_cr8_intercept) return; max_irr = kvm_lapic_find_highest_irr(vcpu); if (max_irr != -1) max_irr >>= 4; tpr = kvm_lapic_get_cr8(vcpu); kvm_x86_ops->update_cr8_intercept(vcpu, tpr, max_irr); } static void inject_irq(struct kvm_vcpu *vcpu) { /* try to reinject previous events if any */ if (vcpu->arch.nmi_injected) { kvm_x86_ops->set_nmi(vcpu); return; } if (vcpu->arch.interrupt.pending) { kvm_x86_ops->set_irq(vcpu, vcpu->arch.interrupt.nr); return; } /* try to inject new event if pending */ if (vcpu->arch.nmi_pending) { if (kvm_x86_ops->nmi_allowed(vcpu)) { vcpu->arch.nmi_pending = false; vcpu->arch.nmi_injected = true; kvm_x86_ops->set_nmi(vcpu); } } else if (kvm_cpu_has_interrupt(vcpu)) { if (kvm_x86_ops->interrupt_allowed(vcpu)) { kvm_queue_interrupt(vcpu, kvm_cpu_get_interrupt(vcpu)); kvm_x86_ops->set_irq(vcpu, vcpu->arch.interrupt.nr); } } } static void inject_pending_irq(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) { bool req_int_win = !irqchip_in_kernel(vcpu->kvm) && kvm_run->request_interrupt_window; if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP) kvm_x86_ops->drop_interrupt_shadow(vcpu); inject_irq(vcpu); /* enable NMI/IRQ window open exits if needed */ if (vcpu->arch.nmi_pending) kvm_x86_ops->enable_nmi_window(vcpu); else if (kvm_cpu_has_interrupt(vcpu) || req_int_win) kvm_x86_ops->enable_irq_window(vcpu); } static int vcpu_enter_guest(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) static int vcpu_enter_guest(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) { { int r; int r; Loading Loading @@ -3172,9 +3234,14 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) if (vcpu->arch.exception.pending) if (vcpu->arch.exception.pending) __queue_exception(vcpu); __queue_exception(vcpu); else else kvm_x86_ops->inject_pending_irq(vcpu, kvm_run); inject_pending_irq(vcpu, kvm_run); if (kvm_lapic_enabled(vcpu)) { if (!vcpu->arch.apic->vapic_addr) update_cr8_intercept(vcpu); else kvm_lapic_sync_to_vapic(vcpu); kvm_lapic_sync_to_vapic(vcpu); } up_read(&vcpu->kvm->slots_lock); up_read(&vcpu->kvm->slots_lock); Loading