Loading drivers/kvm/kvm.h +2 −0 Original line number Original line Diff line number Diff line Loading @@ -482,6 +482,8 @@ void kvm_mmu_post_write(struct kvm_vcpu *vcpu, gpa_t gpa, int bytes); int kvm_mmu_unprotect_page_virt(struct kvm_vcpu *vcpu, gva_t gva); int kvm_mmu_unprotect_page_virt(struct kvm_vcpu *vcpu, gva_t gva); void kvm_mmu_free_some_pages(struct kvm_vcpu *vcpu); void kvm_mmu_free_some_pages(struct kvm_vcpu *vcpu); int kvm_hypercall(struct kvm_vcpu *vcpu, struct kvm_run *run); static inline int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t gva, static inline int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t gva, u32 error_code) u32 error_code) { { Loading drivers/kvm/kvm_main.c +36 −0 Original line number Original line Diff line number Diff line Loading @@ -1138,6 +1138,42 @@ int emulate_instruction(struct kvm_vcpu *vcpu, } } EXPORT_SYMBOL_GPL(emulate_instruction); EXPORT_SYMBOL_GPL(emulate_instruction); int kvm_hypercall(struct kvm_vcpu *vcpu, struct kvm_run *run) { unsigned long nr, a0, a1, a2, a3, a4, a5, ret; kvm_arch_ops->decache_regs(vcpu); ret = -KVM_EINVAL; #ifdef CONFIG_X86_64 if (is_long_mode(vcpu)) { nr = vcpu->regs[VCPU_REGS_RAX]; a0 = vcpu->regs[VCPU_REGS_RDI]; a1 = vcpu->regs[VCPU_REGS_RSI]; a2 = vcpu->regs[VCPU_REGS_RDX]; a3 = vcpu->regs[VCPU_REGS_RCX]; a4 = vcpu->regs[VCPU_REGS_R8]; a5 = vcpu->regs[VCPU_REGS_R9]; } else #endif { nr = vcpu->regs[VCPU_REGS_RBX] & -1u; a0 = vcpu->regs[VCPU_REGS_RAX] & -1u; a1 = vcpu->regs[VCPU_REGS_RCX] & -1u; a2 = vcpu->regs[VCPU_REGS_RDX] & -1u; a3 = vcpu->regs[VCPU_REGS_RSI] & -1u; a4 = vcpu->regs[VCPU_REGS_RDI] & -1u; a5 = vcpu->regs[VCPU_REGS_RBP] & -1u; } switch (nr) { default: ; } vcpu->regs[VCPU_REGS_RAX] = ret; kvm_arch_ops->cache_regs(vcpu); return 1; } EXPORT_SYMBOL_GPL(kvm_hypercall); static u64 mk_cr_64(u64 curr_cr, u32 new_val) static u64 mk_cr_64(u64 curr_cr, u32 new_val) { { return (curr_cr & ~((1ULL << 32) - 1)) | new_val; return (curr_cr & ~((1ULL << 32) - 1)) | new_val; Loading drivers/kvm/svm.c +1 −9 Original line number Original line Diff line number Diff line Loading @@ -1078,16 +1078,8 @@ static int halt_interception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) static int vmmcall_interception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) static int vmmcall_interception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) { { printk(KERN_DEBUG "got vmmcall at RIP %08llx\n", vcpu->svm->vmcb->save.rip); printk(KERN_DEBUG "vmmcall params: %08llx, %08lx, %08lx, %08lx\n", vcpu->svm->vmcb->save.rax, vcpu->regs[VCPU_REGS_RCX], vcpu->regs[VCPU_REGS_RDX], vcpu->regs[VCPU_REGS_RBP]); vcpu->svm->vmcb->save.rax = 0; vcpu->svm->vmcb->save.rip += 3; vcpu->svm->vmcb->save.rip += 3; return 1; return kvm_hypercall(vcpu, kvm_run); } } static int invalid_op_interception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) static int invalid_op_interception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) Loading drivers/kvm/vmx.c +1 −9 Original line number Original line Diff line number Diff line Loading @@ -1659,16 +1659,8 @@ static int handle_halt(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) static int handle_vmcall(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) static int handle_vmcall(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) { { kvm_run->exit_reason = KVM_EXIT_DEBUG; printk(KERN_DEBUG "got vmcall at RIP %08lx\n", vmcs_readl(GUEST_RIP)); printk(KERN_DEBUG "vmcall params: %08lx, %08lx, %08lx, %08lx\n", vcpu->regs[VCPU_REGS_RAX], vcpu->regs[VCPU_REGS_RCX], vcpu->regs[VCPU_REGS_RDX], vcpu->regs[VCPU_REGS_RBP]); vcpu->regs[VCPU_REGS_RAX] = 0; vmcs_writel(GUEST_RIP, vmcs_readl(GUEST_RIP)+3); vmcs_writel(GUEST_RIP, vmcs_readl(GUEST_RIP)+3); return 1; return kvm_hypercall(vcpu, kvm_run); } } /* /* Loading Loading
drivers/kvm/kvm.h +2 −0 Original line number Original line Diff line number Diff line Loading @@ -482,6 +482,8 @@ void kvm_mmu_post_write(struct kvm_vcpu *vcpu, gpa_t gpa, int bytes); int kvm_mmu_unprotect_page_virt(struct kvm_vcpu *vcpu, gva_t gva); int kvm_mmu_unprotect_page_virt(struct kvm_vcpu *vcpu, gva_t gva); void kvm_mmu_free_some_pages(struct kvm_vcpu *vcpu); void kvm_mmu_free_some_pages(struct kvm_vcpu *vcpu); int kvm_hypercall(struct kvm_vcpu *vcpu, struct kvm_run *run); static inline int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t gva, static inline int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t gva, u32 error_code) u32 error_code) { { Loading
drivers/kvm/kvm_main.c +36 −0 Original line number Original line Diff line number Diff line Loading @@ -1138,6 +1138,42 @@ int emulate_instruction(struct kvm_vcpu *vcpu, } } EXPORT_SYMBOL_GPL(emulate_instruction); EXPORT_SYMBOL_GPL(emulate_instruction); int kvm_hypercall(struct kvm_vcpu *vcpu, struct kvm_run *run) { unsigned long nr, a0, a1, a2, a3, a4, a5, ret; kvm_arch_ops->decache_regs(vcpu); ret = -KVM_EINVAL; #ifdef CONFIG_X86_64 if (is_long_mode(vcpu)) { nr = vcpu->regs[VCPU_REGS_RAX]; a0 = vcpu->regs[VCPU_REGS_RDI]; a1 = vcpu->regs[VCPU_REGS_RSI]; a2 = vcpu->regs[VCPU_REGS_RDX]; a3 = vcpu->regs[VCPU_REGS_RCX]; a4 = vcpu->regs[VCPU_REGS_R8]; a5 = vcpu->regs[VCPU_REGS_R9]; } else #endif { nr = vcpu->regs[VCPU_REGS_RBX] & -1u; a0 = vcpu->regs[VCPU_REGS_RAX] & -1u; a1 = vcpu->regs[VCPU_REGS_RCX] & -1u; a2 = vcpu->regs[VCPU_REGS_RDX] & -1u; a3 = vcpu->regs[VCPU_REGS_RSI] & -1u; a4 = vcpu->regs[VCPU_REGS_RDI] & -1u; a5 = vcpu->regs[VCPU_REGS_RBP] & -1u; } switch (nr) { default: ; } vcpu->regs[VCPU_REGS_RAX] = ret; kvm_arch_ops->cache_regs(vcpu); return 1; } EXPORT_SYMBOL_GPL(kvm_hypercall); static u64 mk_cr_64(u64 curr_cr, u32 new_val) static u64 mk_cr_64(u64 curr_cr, u32 new_val) { { return (curr_cr & ~((1ULL << 32) - 1)) | new_val; return (curr_cr & ~((1ULL << 32) - 1)) | new_val; Loading
drivers/kvm/svm.c +1 −9 Original line number Original line Diff line number Diff line Loading @@ -1078,16 +1078,8 @@ static int halt_interception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) static int vmmcall_interception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) static int vmmcall_interception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) { { printk(KERN_DEBUG "got vmmcall at RIP %08llx\n", vcpu->svm->vmcb->save.rip); printk(KERN_DEBUG "vmmcall params: %08llx, %08lx, %08lx, %08lx\n", vcpu->svm->vmcb->save.rax, vcpu->regs[VCPU_REGS_RCX], vcpu->regs[VCPU_REGS_RDX], vcpu->regs[VCPU_REGS_RBP]); vcpu->svm->vmcb->save.rax = 0; vcpu->svm->vmcb->save.rip += 3; vcpu->svm->vmcb->save.rip += 3; return 1; return kvm_hypercall(vcpu, kvm_run); } } static int invalid_op_interception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) static int invalid_op_interception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) Loading
drivers/kvm/vmx.c +1 −9 Original line number Original line Diff line number Diff line Loading @@ -1659,16 +1659,8 @@ static int handle_halt(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) static int handle_vmcall(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) static int handle_vmcall(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) { { kvm_run->exit_reason = KVM_EXIT_DEBUG; printk(KERN_DEBUG "got vmcall at RIP %08lx\n", vmcs_readl(GUEST_RIP)); printk(KERN_DEBUG "vmcall params: %08lx, %08lx, %08lx, %08lx\n", vcpu->regs[VCPU_REGS_RAX], vcpu->regs[VCPU_REGS_RCX], vcpu->regs[VCPU_REGS_RDX], vcpu->regs[VCPU_REGS_RBP]); vcpu->regs[VCPU_REGS_RAX] = 0; vmcs_writel(GUEST_RIP, vmcs_readl(GUEST_RIP)+3); vmcs_writel(GUEST_RIP, vmcs_readl(GUEST_RIP)+3); return 1; return kvm_hypercall(vcpu, kvm_run); } } /* /* Loading