Loading arch/x86/include/asm/kvm_emulate.h +16 −18 Original line number Diff line number Diff line Loading @@ -92,8 +92,9 @@ struct x86_emulate_ops { * @val: [OUT] Value read from memory, zero-extended to 'u_long'. * @bytes: [IN ] Number of bytes to read from memory. */ int (*read_std)(unsigned long addr, void *val, unsigned int bytes, struct kvm_vcpu *vcpu, int (*read_std)(struct x86_emulate_ctxt *ctxt, unsigned long addr, void *val, unsigned int bytes, struct x86_exception *fault); /* Loading @@ -103,8 +104,8 @@ struct x86_emulate_ops { * @val: [OUT] Value write to memory, zero-extended to 'u_long'. * @bytes: [IN ] Number of bytes to write to memory. */ int (*write_std)(unsigned long addr, void *val, unsigned int bytes, struct kvm_vcpu *vcpu, int (*write_std)(struct x86_emulate_ctxt *ctxt, unsigned long addr, void *val, unsigned int bytes, struct x86_exception *fault); /* * fetch: Read bytes of standard (non-emulated/special) memory. Loading @@ -113,8 +114,8 @@ struct x86_emulate_ops { * @val: [OUT] Value read from memory, zero-extended to 'u_long'. * @bytes: [IN ] Number of bytes to read from memory. */ int (*fetch)(unsigned long addr, void *val, unsigned int bytes, struct kvm_vcpu *vcpu, int (*fetch)(struct x86_emulate_ctxt *ctxt, unsigned long addr, void *val, unsigned int bytes, struct x86_exception *fault); /* Loading @@ -123,11 +124,9 @@ struct x86_emulate_ops { * @val: [OUT] Value read from memory, zero-extended to 'u_long'. * @bytes: [IN ] Number of bytes to read from memory. */ int (*read_emulated)(unsigned long addr, void *val, unsigned int bytes, struct x86_exception *fault, struct kvm_vcpu *vcpu); int (*read_emulated)(struct x86_emulate_ctxt *ctxt, unsigned long addr, void *val, unsigned int bytes, struct x86_exception *fault); /* * write_emulated: Write bytes to emulated/special memory area. Loading @@ -136,11 +135,10 @@ struct x86_emulate_ops { * required). * @bytes: [IN ] Number of bytes to write to memory. */ int (*write_emulated)(unsigned long addr, const void *val, int (*write_emulated)(struct x86_emulate_ctxt *ctxt, unsigned long addr, const void *val, unsigned int bytes, struct x86_exception *fault, struct kvm_vcpu *vcpu); struct x86_exception *fault); /* * cmpxchg_emulated: Emulate an atomic (LOCKed) CMPXCHG operation on an Loading @@ -150,12 +148,12 @@ struct x86_emulate_ops { * @new: [IN ] Value to write to @addr. * @bytes: [IN ] Number of bytes to access using CMPXCHG. */ int (*cmpxchg_emulated)(unsigned long addr, int (*cmpxchg_emulated)(struct x86_emulate_ctxt *ctxt, unsigned long addr, const void *old, const void *new, unsigned int bytes, struct x86_exception *fault, struct kvm_vcpu *vcpu); struct x86_exception *fault); int (*pio_in_emulated)(int size, unsigned short port, void *val, unsigned int count, struct kvm_vcpu *vcpu); Loading arch/x86/kvm/emulate.c +25 −29 Original line number Diff line number Diff line Loading @@ -645,8 +645,7 @@ static int segmented_read_std(struct x86_emulate_ctxt *ctxt, rc = linearize(ctxt, addr, size, false, &linear); if (rc != X86EMUL_CONTINUE) return rc; return ctxt->ops->read_std(linear, data, size, ctxt->vcpu, &ctxt->exception); return ctxt->ops->read_std(ctxt, linear, data, size, &ctxt->exception); } static int do_fetch_insn_byte(struct x86_emulate_ctxt *ctxt, Loading @@ -665,8 +664,8 @@ static int do_fetch_insn_byte(struct x86_emulate_ctxt *ctxt, rc = __linearize(ctxt, addr, size, false, true, &linear); if (rc != X86EMUL_CONTINUE) return rc; rc = ops->fetch(linear, fc->data + cur_size, size, ctxt->vcpu, &ctxt->exception); rc = ops->fetch(ctxt, linear, fc->data + cur_size, size, &ctxt->exception); if (rc != X86EMUL_CONTINUE) return rc; fc->end += size; Loading Loading @@ -1047,8 +1046,8 @@ static int read_emulated(struct x86_emulate_ctxt *ctxt, if (mc->pos < mc->end) goto read_cached; rc = ops->read_emulated(addr, mc->data + mc->end, n, &ctxt->exception, ctxt->vcpu); rc = ops->read_emulated(ctxt, addr, mc->data + mc->end, n, &ctxt->exception); if (rc != X86EMUL_CONTINUE) return rc; mc->end += n; Loading Loading @@ -1087,8 +1086,8 @@ static int segmented_write(struct x86_emulate_ctxt *ctxt, rc = linearize(ctxt, addr, size, true, &linear); if (rc != X86EMUL_CONTINUE) return rc; return ctxt->ops->write_emulated(linear, data, size, &ctxt->exception, ctxt->vcpu); return ctxt->ops->write_emulated(ctxt, linear, data, size, &ctxt->exception); } static int segmented_cmpxchg(struct x86_emulate_ctxt *ctxt, Loading @@ -1102,8 +1101,8 @@ static int segmented_cmpxchg(struct x86_emulate_ctxt *ctxt, rc = linearize(ctxt, addr, size, true, &linear); if (rc != X86EMUL_CONTINUE) return rc; return ctxt->ops->cmpxchg_emulated(linear, orig_data, data, size, &ctxt->exception, ctxt->vcpu); return ctxt->ops->cmpxchg_emulated(ctxt, linear, orig_data, data, size, &ctxt->exception); } static int pio_in_emulated(struct x86_emulate_ctxt *ctxt, Loading Loading @@ -1168,8 +1167,7 @@ static int read_segment_descriptor(struct x86_emulate_ctxt *ctxt, if (dt.size < index * 8 + 7) return emulate_gp(ctxt, selector & 0xfffc); addr = dt.address + index * 8; ret = ops->read_std(addr, desc, sizeof *desc, ctxt->vcpu, &ctxt->exception); ret = ops->read_std(ctxt, addr, desc, sizeof *desc, &ctxt->exception); return ret; } Loading @@ -1190,8 +1188,7 @@ static int write_segment_descriptor(struct x86_emulate_ctxt *ctxt, return emulate_gp(ctxt, selector & 0xfffc); addr = dt.address + index * 8; ret = ops->write_std(addr, desc, sizeof *desc, ctxt->vcpu, &ctxt->exception); ret = ops->write_std(ctxt, addr, desc, sizeof *desc, &ctxt->exception); return ret; } Loading Loading @@ -1545,11 +1542,11 @@ int emulate_int_real(struct x86_emulate_ctxt *ctxt, eip_addr = dt.address + (irq << 2); cs_addr = dt.address + (irq << 2) + 2; rc = ops->read_std(cs_addr, &cs, 2, ctxt->vcpu, &ctxt->exception); rc = ops->read_std(ctxt, cs_addr, &cs, 2, &ctxt->exception); if (rc != X86EMUL_CONTINUE) return rc; rc = ops->read_std(eip_addr, &eip, 2, ctxt->vcpu, &ctxt->exception); rc = ops->read_std(ctxt, eip_addr, &eip, 2, &ctxt->exception); if (rc != X86EMUL_CONTINUE) return rc; Loading Loading @@ -2036,13 +2033,12 @@ static bool emulator_io_port_access_allowed(struct x86_emulate_ctxt *ctxt, #ifdef CONFIG_X86_64 base |= ((u64)base3) << 32; #endif r = ops->read_std(base + 102, &io_bitmap_ptr, 2, ctxt->vcpu, NULL); r = ops->read_std(ctxt, base + 102, &io_bitmap_ptr, 2, NULL); if (r != X86EMUL_CONTINUE) return false; if (io_bitmap_ptr + port/8 > desc_limit_scaled(&tr_seg)) return false; r = ops->read_std(base + io_bitmap_ptr + port/8, &perm, 2, ctxt->vcpu, NULL); r = ops->read_std(ctxt, base + io_bitmap_ptr + port/8, &perm, 2, NULL); if (r != X86EMUL_CONTINUE) return false; if ((perm >> bit_idx) & mask) Loading Loading @@ -2150,7 +2146,7 @@ static int task_switch_16(struct x86_emulate_ctxt *ctxt, int ret; u32 new_tss_base = get_desc_base(new_desc); ret = ops->read_std(old_tss_base, &tss_seg, sizeof tss_seg, ctxt->vcpu, ret = ops->read_std(ctxt, old_tss_base, &tss_seg, sizeof tss_seg, &ctxt->exception); if (ret != X86EMUL_CONTINUE) /* FIXME: need to provide precise fault address */ Loading @@ -2158,13 +2154,13 @@ static int task_switch_16(struct x86_emulate_ctxt *ctxt, save_state_to_tss16(ctxt, ops, &tss_seg); ret = ops->write_std(old_tss_base, &tss_seg, sizeof tss_seg, ctxt->vcpu, ret = ops->write_std(ctxt, old_tss_base, &tss_seg, sizeof tss_seg, &ctxt->exception); if (ret != X86EMUL_CONTINUE) /* FIXME: need to provide precise fault address */ return ret; ret = ops->read_std(new_tss_base, &tss_seg, sizeof tss_seg, ctxt->vcpu, ret = ops->read_std(ctxt, new_tss_base, &tss_seg, sizeof tss_seg, &ctxt->exception); if (ret != X86EMUL_CONTINUE) /* FIXME: need to provide precise fault address */ Loading @@ -2173,10 +2169,10 @@ static int task_switch_16(struct x86_emulate_ctxt *ctxt, if (old_tss_sel != 0xffff) { tss_seg.prev_task_link = old_tss_sel; ret = ops->write_std(new_tss_base, ret = ops->write_std(ctxt, new_tss_base, &tss_seg.prev_task_link, sizeof tss_seg.prev_task_link, ctxt->vcpu, &ctxt->exception); &ctxt->exception); if (ret != X86EMUL_CONTINUE) /* FIXME: need to provide precise fault address */ return ret; Loading Loading @@ -2282,7 +2278,7 @@ static int task_switch_32(struct x86_emulate_ctxt *ctxt, int ret; u32 new_tss_base = get_desc_base(new_desc); ret = ops->read_std(old_tss_base, &tss_seg, sizeof tss_seg, ctxt->vcpu, ret = ops->read_std(ctxt, old_tss_base, &tss_seg, sizeof tss_seg, &ctxt->exception); if (ret != X86EMUL_CONTINUE) /* FIXME: need to provide precise fault address */ Loading @@ -2290,13 +2286,13 @@ static int task_switch_32(struct x86_emulate_ctxt *ctxt, save_state_to_tss32(ctxt, ops, &tss_seg); ret = ops->write_std(old_tss_base, &tss_seg, sizeof tss_seg, ctxt->vcpu, ret = ops->write_std(ctxt, old_tss_base, &tss_seg, sizeof tss_seg, &ctxt->exception); if (ret != X86EMUL_CONTINUE) /* FIXME: need to provide precise fault address */ return ret; ret = ops->read_std(new_tss_base, &tss_seg, sizeof tss_seg, ctxt->vcpu, ret = ops->read_std(ctxt, new_tss_base, &tss_seg, sizeof tss_seg, &ctxt->exception); if (ret != X86EMUL_CONTINUE) /* FIXME: need to provide precise fault address */ Loading @@ -2305,10 +2301,10 @@ static int task_switch_32(struct x86_emulate_ctxt *ctxt, if (old_tss_sel != 0xffff) { tss_seg.prev_task_link = old_tss_sel; ret = ops->write_std(new_tss_base, ret = ops->write_std(ctxt, new_tss_base, &tss_seg.prev_task_link, sizeof tss_seg.prev_task_link, ctxt->vcpu, &ctxt->exception); &ctxt->exception); if (ret != X86EMUL_CONTINUE) /* FIXME: need to provide precise fault address */ return ret; Loading arch/x86/kvm/x86.c +34 −20 Original line number Diff line number Diff line Loading @@ -63,6 +63,9 @@ #define KVM_MAX_MCE_BANKS 32 #define KVM_MCE_CAP_SUPPORTED (MCG_CTL_P | MCG_SER_P) #define emul_to_vcpu(ctxt) \ container_of(ctxt, struct kvm_vcpu, arch.emulate_ctxt) /* EFER defaults: * - enable syscall per default because its emulated by KVM * - enable LME and LMA per default on 64 bit KVM Loading Loading @@ -3760,37 +3763,43 @@ static int kvm_read_guest_virt_helper(gva_t addr, void *val, unsigned int bytes, } /* used for instruction fetching */ static int kvm_fetch_guest_virt(gva_t addr, void *val, unsigned int bytes, struct kvm_vcpu *vcpu, static int kvm_fetch_guest_virt(struct x86_emulate_ctxt *ctxt, gva_t addr, void *val, unsigned int bytes, struct x86_exception *exception) { struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt); u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0; return kvm_read_guest_virt_helper(addr, val, bytes, vcpu, access | PFERR_FETCH_MASK, exception); } static int kvm_read_guest_virt(gva_t addr, void *val, unsigned int bytes, struct kvm_vcpu *vcpu, static int kvm_read_guest_virt(struct x86_emulate_ctxt *ctxt, gva_t addr, void *val, unsigned int bytes, struct x86_exception *exception) { struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt); u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0; return kvm_read_guest_virt_helper(addr, val, bytes, vcpu, access, exception); } static int kvm_read_guest_virt_system(gva_t addr, void *val, unsigned int bytes, struct kvm_vcpu *vcpu, static int kvm_read_guest_virt_system(struct x86_emulate_ctxt *ctxt, gva_t addr, void *val, unsigned int bytes, struct x86_exception *exception) { struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt); return kvm_read_guest_virt_helper(addr, val, bytes, vcpu, 0, exception); } static int kvm_write_guest_virt_system(gva_t addr, void *val, static int kvm_write_guest_virt_system(struct x86_emulate_ctxt *ctxt, gva_t addr, void *val, unsigned int bytes, struct kvm_vcpu *vcpu, struct x86_exception *exception) { struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt); void *data = val; int r = X86EMUL_CONTINUE; Loading Loading @@ -3818,12 +3827,13 @@ static int kvm_write_guest_virt_system(gva_t addr, void *val, return r; } static int emulator_read_emulated(unsigned long addr, static int emulator_read_emulated(struct x86_emulate_ctxt *ctxt, unsigned long addr, void *val, unsigned int bytes, struct x86_exception *exception, struct kvm_vcpu *vcpu) struct x86_exception *exception) { struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt); gpa_t gpa; int handled; Loading @@ -3844,7 +3854,7 @@ static int emulator_read_emulated(unsigned long addr, if ((gpa & PAGE_MASK) == APIC_DEFAULT_PHYS_BASE) goto mmio; if (kvm_read_guest_virt(addr, val, bytes, vcpu, exception) if (kvm_read_guest_virt(ctxt, addr, val, bytes, exception) == X86EMUL_CONTINUE) return X86EMUL_CONTINUE; Loading Loading @@ -3933,12 +3943,14 @@ static int emulator_write_emulated_onepage(unsigned long addr, return X86EMUL_CONTINUE; } int emulator_write_emulated(unsigned long addr, int emulator_write_emulated(struct x86_emulate_ctxt *ctxt, unsigned long addr, const void *val, unsigned int bytes, struct x86_exception *exception, struct kvm_vcpu *vcpu) struct x86_exception *exception) { struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt); /* Crossing a page boundary? */ if (((addr + bytes - 1) ^ addr) & PAGE_MASK) { int rc, now; Loading Loading @@ -3966,13 +3978,14 @@ int emulator_write_emulated(unsigned long addr, (cmpxchg64((u64 *)(ptr), *(u64 *)(old), *(u64 *)(new)) == *(u64 *)(old)) #endif static int emulator_cmpxchg_emulated(unsigned long addr, static int emulator_cmpxchg_emulated(struct x86_emulate_ctxt *ctxt, unsigned long addr, const void *old, const void *new, unsigned int bytes, struct x86_exception *exception, struct kvm_vcpu *vcpu) struct x86_exception *exception) { struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt); gpa_t gpa; struct page *page; char *kaddr; Loading Loading @@ -4028,7 +4041,7 @@ static int emulator_cmpxchg_emulated(unsigned long addr, emul_write: printk_once(KERN_WARNING "kvm: emulating exchange as write\n"); return emulator_write_emulated(addr, new, bytes, exception, vcpu); return emulator_write_emulated(ctxt, addr, new, bytes, exception); } static int kernel_pio(struct kvm_vcpu *vcpu, void *pd) Loading Loading @@ -5009,7 +5022,8 @@ int kvm_fix_hypercall(struct kvm_vcpu *vcpu) kvm_x86_ops->patch_hypercall(vcpu, instruction); return emulator_write_emulated(rip, instruction, 3, NULL, vcpu); return emulator_write_emulated(&vcpu->arch.emulate_ctxt, rip, instruction, 3, NULL); } void realmode_lgdt(struct kvm_vcpu *vcpu, u16 limit, unsigned long base) Loading Loading
arch/x86/include/asm/kvm_emulate.h +16 −18 Original line number Diff line number Diff line Loading @@ -92,8 +92,9 @@ struct x86_emulate_ops { * @val: [OUT] Value read from memory, zero-extended to 'u_long'. * @bytes: [IN ] Number of bytes to read from memory. */ int (*read_std)(unsigned long addr, void *val, unsigned int bytes, struct kvm_vcpu *vcpu, int (*read_std)(struct x86_emulate_ctxt *ctxt, unsigned long addr, void *val, unsigned int bytes, struct x86_exception *fault); /* Loading @@ -103,8 +104,8 @@ struct x86_emulate_ops { * @val: [OUT] Value write to memory, zero-extended to 'u_long'. * @bytes: [IN ] Number of bytes to write to memory. */ int (*write_std)(unsigned long addr, void *val, unsigned int bytes, struct kvm_vcpu *vcpu, int (*write_std)(struct x86_emulate_ctxt *ctxt, unsigned long addr, void *val, unsigned int bytes, struct x86_exception *fault); /* * fetch: Read bytes of standard (non-emulated/special) memory. Loading @@ -113,8 +114,8 @@ struct x86_emulate_ops { * @val: [OUT] Value read from memory, zero-extended to 'u_long'. * @bytes: [IN ] Number of bytes to read from memory. */ int (*fetch)(unsigned long addr, void *val, unsigned int bytes, struct kvm_vcpu *vcpu, int (*fetch)(struct x86_emulate_ctxt *ctxt, unsigned long addr, void *val, unsigned int bytes, struct x86_exception *fault); /* Loading @@ -123,11 +124,9 @@ struct x86_emulate_ops { * @val: [OUT] Value read from memory, zero-extended to 'u_long'. * @bytes: [IN ] Number of bytes to read from memory. */ int (*read_emulated)(unsigned long addr, void *val, unsigned int bytes, struct x86_exception *fault, struct kvm_vcpu *vcpu); int (*read_emulated)(struct x86_emulate_ctxt *ctxt, unsigned long addr, void *val, unsigned int bytes, struct x86_exception *fault); /* * write_emulated: Write bytes to emulated/special memory area. Loading @@ -136,11 +135,10 @@ struct x86_emulate_ops { * required). * @bytes: [IN ] Number of bytes to write to memory. */ int (*write_emulated)(unsigned long addr, const void *val, int (*write_emulated)(struct x86_emulate_ctxt *ctxt, unsigned long addr, const void *val, unsigned int bytes, struct x86_exception *fault, struct kvm_vcpu *vcpu); struct x86_exception *fault); /* * cmpxchg_emulated: Emulate an atomic (LOCKed) CMPXCHG operation on an Loading @@ -150,12 +148,12 @@ struct x86_emulate_ops { * @new: [IN ] Value to write to @addr. * @bytes: [IN ] Number of bytes to access using CMPXCHG. */ int (*cmpxchg_emulated)(unsigned long addr, int (*cmpxchg_emulated)(struct x86_emulate_ctxt *ctxt, unsigned long addr, const void *old, const void *new, unsigned int bytes, struct x86_exception *fault, struct kvm_vcpu *vcpu); struct x86_exception *fault); int (*pio_in_emulated)(int size, unsigned short port, void *val, unsigned int count, struct kvm_vcpu *vcpu); Loading
arch/x86/kvm/emulate.c +25 −29 Original line number Diff line number Diff line Loading @@ -645,8 +645,7 @@ static int segmented_read_std(struct x86_emulate_ctxt *ctxt, rc = linearize(ctxt, addr, size, false, &linear); if (rc != X86EMUL_CONTINUE) return rc; return ctxt->ops->read_std(linear, data, size, ctxt->vcpu, &ctxt->exception); return ctxt->ops->read_std(ctxt, linear, data, size, &ctxt->exception); } static int do_fetch_insn_byte(struct x86_emulate_ctxt *ctxt, Loading @@ -665,8 +664,8 @@ static int do_fetch_insn_byte(struct x86_emulate_ctxt *ctxt, rc = __linearize(ctxt, addr, size, false, true, &linear); if (rc != X86EMUL_CONTINUE) return rc; rc = ops->fetch(linear, fc->data + cur_size, size, ctxt->vcpu, &ctxt->exception); rc = ops->fetch(ctxt, linear, fc->data + cur_size, size, &ctxt->exception); if (rc != X86EMUL_CONTINUE) return rc; fc->end += size; Loading Loading @@ -1047,8 +1046,8 @@ static int read_emulated(struct x86_emulate_ctxt *ctxt, if (mc->pos < mc->end) goto read_cached; rc = ops->read_emulated(addr, mc->data + mc->end, n, &ctxt->exception, ctxt->vcpu); rc = ops->read_emulated(ctxt, addr, mc->data + mc->end, n, &ctxt->exception); if (rc != X86EMUL_CONTINUE) return rc; mc->end += n; Loading Loading @@ -1087,8 +1086,8 @@ static int segmented_write(struct x86_emulate_ctxt *ctxt, rc = linearize(ctxt, addr, size, true, &linear); if (rc != X86EMUL_CONTINUE) return rc; return ctxt->ops->write_emulated(linear, data, size, &ctxt->exception, ctxt->vcpu); return ctxt->ops->write_emulated(ctxt, linear, data, size, &ctxt->exception); } static int segmented_cmpxchg(struct x86_emulate_ctxt *ctxt, Loading @@ -1102,8 +1101,8 @@ static int segmented_cmpxchg(struct x86_emulate_ctxt *ctxt, rc = linearize(ctxt, addr, size, true, &linear); if (rc != X86EMUL_CONTINUE) return rc; return ctxt->ops->cmpxchg_emulated(linear, orig_data, data, size, &ctxt->exception, ctxt->vcpu); return ctxt->ops->cmpxchg_emulated(ctxt, linear, orig_data, data, size, &ctxt->exception); } static int pio_in_emulated(struct x86_emulate_ctxt *ctxt, Loading Loading @@ -1168,8 +1167,7 @@ static int read_segment_descriptor(struct x86_emulate_ctxt *ctxt, if (dt.size < index * 8 + 7) return emulate_gp(ctxt, selector & 0xfffc); addr = dt.address + index * 8; ret = ops->read_std(addr, desc, sizeof *desc, ctxt->vcpu, &ctxt->exception); ret = ops->read_std(ctxt, addr, desc, sizeof *desc, &ctxt->exception); return ret; } Loading @@ -1190,8 +1188,7 @@ static int write_segment_descriptor(struct x86_emulate_ctxt *ctxt, return emulate_gp(ctxt, selector & 0xfffc); addr = dt.address + index * 8; ret = ops->write_std(addr, desc, sizeof *desc, ctxt->vcpu, &ctxt->exception); ret = ops->write_std(ctxt, addr, desc, sizeof *desc, &ctxt->exception); return ret; } Loading Loading @@ -1545,11 +1542,11 @@ int emulate_int_real(struct x86_emulate_ctxt *ctxt, eip_addr = dt.address + (irq << 2); cs_addr = dt.address + (irq << 2) + 2; rc = ops->read_std(cs_addr, &cs, 2, ctxt->vcpu, &ctxt->exception); rc = ops->read_std(ctxt, cs_addr, &cs, 2, &ctxt->exception); if (rc != X86EMUL_CONTINUE) return rc; rc = ops->read_std(eip_addr, &eip, 2, ctxt->vcpu, &ctxt->exception); rc = ops->read_std(ctxt, eip_addr, &eip, 2, &ctxt->exception); if (rc != X86EMUL_CONTINUE) return rc; Loading Loading @@ -2036,13 +2033,12 @@ static bool emulator_io_port_access_allowed(struct x86_emulate_ctxt *ctxt, #ifdef CONFIG_X86_64 base |= ((u64)base3) << 32; #endif r = ops->read_std(base + 102, &io_bitmap_ptr, 2, ctxt->vcpu, NULL); r = ops->read_std(ctxt, base + 102, &io_bitmap_ptr, 2, NULL); if (r != X86EMUL_CONTINUE) return false; if (io_bitmap_ptr + port/8 > desc_limit_scaled(&tr_seg)) return false; r = ops->read_std(base + io_bitmap_ptr + port/8, &perm, 2, ctxt->vcpu, NULL); r = ops->read_std(ctxt, base + io_bitmap_ptr + port/8, &perm, 2, NULL); if (r != X86EMUL_CONTINUE) return false; if ((perm >> bit_idx) & mask) Loading Loading @@ -2150,7 +2146,7 @@ static int task_switch_16(struct x86_emulate_ctxt *ctxt, int ret; u32 new_tss_base = get_desc_base(new_desc); ret = ops->read_std(old_tss_base, &tss_seg, sizeof tss_seg, ctxt->vcpu, ret = ops->read_std(ctxt, old_tss_base, &tss_seg, sizeof tss_seg, &ctxt->exception); if (ret != X86EMUL_CONTINUE) /* FIXME: need to provide precise fault address */ Loading @@ -2158,13 +2154,13 @@ static int task_switch_16(struct x86_emulate_ctxt *ctxt, save_state_to_tss16(ctxt, ops, &tss_seg); ret = ops->write_std(old_tss_base, &tss_seg, sizeof tss_seg, ctxt->vcpu, ret = ops->write_std(ctxt, old_tss_base, &tss_seg, sizeof tss_seg, &ctxt->exception); if (ret != X86EMUL_CONTINUE) /* FIXME: need to provide precise fault address */ return ret; ret = ops->read_std(new_tss_base, &tss_seg, sizeof tss_seg, ctxt->vcpu, ret = ops->read_std(ctxt, new_tss_base, &tss_seg, sizeof tss_seg, &ctxt->exception); if (ret != X86EMUL_CONTINUE) /* FIXME: need to provide precise fault address */ Loading @@ -2173,10 +2169,10 @@ static int task_switch_16(struct x86_emulate_ctxt *ctxt, if (old_tss_sel != 0xffff) { tss_seg.prev_task_link = old_tss_sel; ret = ops->write_std(new_tss_base, ret = ops->write_std(ctxt, new_tss_base, &tss_seg.prev_task_link, sizeof tss_seg.prev_task_link, ctxt->vcpu, &ctxt->exception); &ctxt->exception); if (ret != X86EMUL_CONTINUE) /* FIXME: need to provide precise fault address */ return ret; Loading Loading @@ -2282,7 +2278,7 @@ static int task_switch_32(struct x86_emulate_ctxt *ctxt, int ret; u32 new_tss_base = get_desc_base(new_desc); ret = ops->read_std(old_tss_base, &tss_seg, sizeof tss_seg, ctxt->vcpu, ret = ops->read_std(ctxt, old_tss_base, &tss_seg, sizeof tss_seg, &ctxt->exception); if (ret != X86EMUL_CONTINUE) /* FIXME: need to provide precise fault address */ Loading @@ -2290,13 +2286,13 @@ static int task_switch_32(struct x86_emulate_ctxt *ctxt, save_state_to_tss32(ctxt, ops, &tss_seg); ret = ops->write_std(old_tss_base, &tss_seg, sizeof tss_seg, ctxt->vcpu, ret = ops->write_std(ctxt, old_tss_base, &tss_seg, sizeof tss_seg, &ctxt->exception); if (ret != X86EMUL_CONTINUE) /* FIXME: need to provide precise fault address */ return ret; ret = ops->read_std(new_tss_base, &tss_seg, sizeof tss_seg, ctxt->vcpu, ret = ops->read_std(ctxt, new_tss_base, &tss_seg, sizeof tss_seg, &ctxt->exception); if (ret != X86EMUL_CONTINUE) /* FIXME: need to provide precise fault address */ Loading @@ -2305,10 +2301,10 @@ static int task_switch_32(struct x86_emulate_ctxt *ctxt, if (old_tss_sel != 0xffff) { tss_seg.prev_task_link = old_tss_sel; ret = ops->write_std(new_tss_base, ret = ops->write_std(ctxt, new_tss_base, &tss_seg.prev_task_link, sizeof tss_seg.prev_task_link, ctxt->vcpu, &ctxt->exception); &ctxt->exception); if (ret != X86EMUL_CONTINUE) /* FIXME: need to provide precise fault address */ return ret; Loading
arch/x86/kvm/x86.c +34 −20 Original line number Diff line number Diff line Loading @@ -63,6 +63,9 @@ #define KVM_MAX_MCE_BANKS 32 #define KVM_MCE_CAP_SUPPORTED (MCG_CTL_P | MCG_SER_P) #define emul_to_vcpu(ctxt) \ container_of(ctxt, struct kvm_vcpu, arch.emulate_ctxt) /* EFER defaults: * - enable syscall per default because its emulated by KVM * - enable LME and LMA per default on 64 bit KVM Loading Loading @@ -3760,37 +3763,43 @@ static int kvm_read_guest_virt_helper(gva_t addr, void *val, unsigned int bytes, } /* used for instruction fetching */ static int kvm_fetch_guest_virt(gva_t addr, void *val, unsigned int bytes, struct kvm_vcpu *vcpu, static int kvm_fetch_guest_virt(struct x86_emulate_ctxt *ctxt, gva_t addr, void *val, unsigned int bytes, struct x86_exception *exception) { struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt); u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0; return kvm_read_guest_virt_helper(addr, val, bytes, vcpu, access | PFERR_FETCH_MASK, exception); } static int kvm_read_guest_virt(gva_t addr, void *val, unsigned int bytes, struct kvm_vcpu *vcpu, static int kvm_read_guest_virt(struct x86_emulate_ctxt *ctxt, gva_t addr, void *val, unsigned int bytes, struct x86_exception *exception) { struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt); u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0; return kvm_read_guest_virt_helper(addr, val, bytes, vcpu, access, exception); } static int kvm_read_guest_virt_system(gva_t addr, void *val, unsigned int bytes, struct kvm_vcpu *vcpu, static int kvm_read_guest_virt_system(struct x86_emulate_ctxt *ctxt, gva_t addr, void *val, unsigned int bytes, struct x86_exception *exception) { struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt); return kvm_read_guest_virt_helper(addr, val, bytes, vcpu, 0, exception); } static int kvm_write_guest_virt_system(gva_t addr, void *val, static int kvm_write_guest_virt_system(struct x86_emulate_ctxt *ctxt, gva_t addr, void *val, unsigned int bytes, struct kvm_vcpu *vcpu, struct x86_exception *exception) { struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt); void *data = val; int r = X86EMUL_CONTINUE; Loading Loading @@ -3818,12 +3827,13 @@ static int kvm_write_guest_virt_system(gva_t addr, void *val, return r; } static int emulator_read_emulated(unsigned long addr, static int emulator_read_emulated(struct x86_emulate_ctxt *ctxt, unsigned long addr, void *val, unsigned int bytes, struct x86_exception *exception, struct kvm_vcpu *vcpu) struct x86_exception *exception) { struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt); gpa_t gpa; int handled; Loading @@ -3844,7 +3854,7 @@ static int emulator_read_emulated(unsigned long addr, if ((gpa & PAGE_MASK) == APIC_DEFAULT_PHYS_BASE) goto mmio; if (kvm_read_guest_virt(addr, val, bytes, vcpu, exception) if (kvm_read_guest_virt(ctxt, addr, val, bytes, exception) == X86EMUL_CONTINUE) return X86EMUL_CONTINUE; Loading Loading @@ -3933,12 +3943,14 @@ static int emulator_write_emulated_onepage(unsigned long addr, return X86EMUL_CONTINUE; } int emulator_write_emulated(unsigned long addr, int emulator_write_emulated(struct x86_emulate_ctxt *ctxt, unsigned long addr, const void *val, unsigned int bytes, struct x86_exception *exception, struct kvm_vcpu *vcpu) struct x86_exception *exception) { struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt); /* Crossing a page boundary? */ if (((addr + bytes - 1) ^ addr) & PAGE_MASK) { int rc, now; Loading Loading @@ -3966,13 +3978,14 @@ int emulator_write_emulated(unsigned long addr, (cmpxchg64((u64 *)(ptr), *(u64 *)(old), *(u64 *)(new)) == *(u64 *)(old)) #endif static int emulator_cmpxchg_emulated(unsigned long addr, static int emulator_cmpxchg_emulated(struct x86_emulate_ctxt *ctxt, unsigned long addr, const void *old, const void *new, unsigned int bytes, struct x86_exception *exception, struct kvm_vcpu *vcpu) struct x86_exception *exception) { struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt); gpa_t gpa; struct page *page; char *kaddr; Loading Loading @@ -4028,7 +4041,7 @@ static int emulator_cmpxchg_emulated(unsigned long addr, emul_write: printk_once(KERN_WARNING "kvm: emulating exchange as write\n"); return emulator_write_emulated(addr, new, bytes, exception, vcpu); return emulator_write_emulated(ctxt, addr, new, bytes, exception); } static int kernel_pio(struct kvm_vcpu *vcpu, void *pd) Loading Loading @@ -5009,7 +5022,8 @@ int kvm_fix_hypercall(struct kvm_vcpu *vcpu) kvm_x86_ops->patch_hypercall(vcpu, instruction); return emulator_write_emulated(rip, instruction, 3, NULL, vcpu); return emulator_write_emulated(&vcpu->arch.emulate_ctxt, rip, instruction, 3, NULL); } void realmode_lgdt(struct kvm_vcpu *vcpu, u16 limit, unsigned long base) Loading