Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit bcc55cba authored by Avi Kivity's avatar Avi Kivity
Browse files

KVM: x86 emulator: make emulator memory callbacks return full exception



This way, they can return #GP, not just #PF.

Signed-off-by: default avatarAvi Kivity <avi@redhat.com>
Signed-off-by: default avatarMarcelo Tosatti <mtosatti@redhat.com>
parent da9cb575
Loading
Loading
Loading
Loading
+9 −6
Original line number Diff line number Diff line
@@ -70,7 +70,8 @@ struct x86_emulate_ops {
	 *  @bytes: [IN ] Number of bytes to read from memory.
	 */
	int (*read_std)(unsigned long addr, void *val,
			unsigned int bytes, struct kvm_vcpu *vcpu, u32 *error);
			unsigned int bytes, struct kvm_vcpu *vcpu,
			struct x86_exception *fault);

	/*
	 * write_std: Write bytes of standard (non-emulated/special) memory.
@@ -80,7 +81,8 @@ struct x86_emulate_ops {
	 *  @bytes: [IN ] Number of bytes to write to memory.
	 */
	int (*write_std)(unsigned long addr, void *val,
			 unsigned int bytes, struct kvm_vcpu *vcpu, u32 *error);
			 unsigned int bytes, struct kvm_vcpu *vcpu,
			 struct x86_exception *fault);
	/*
	 * fetch: Read bytes of standard (non-emulated/special) memory.
	 *        Used for instruction fetch.
@@ -89,7 +91,8 @@ struct x86_emulate_ops {
	 *  @bytes: [IN ] Number of bytes to read from memory.
	 */
	int (*fetch)(unsigned long addr, void *val,
			unsigned int bytes, struct kvm_vcpu *vcpu, u32 *error);
		     unsigned int bytes, struct kvm_vcpu *vcpu,
		     struct x86_exception *fault);

	/*
	 * read_emulated: Read bytes from emulated/special memory area.
@@ -100,7 +103,7 @@ struct x86_emulate_ops {
	int (*read_emulated)(unsigned long addr,
			     void *val,
			     unsigned int bytes,
			     unsigned int *error,
			     struct x86_exception *fault,
			     struct kvm_vcpu *vcpu);

	/*
@@ -113,7 +116,7 @@ struct x86_emulate_ops {
	int (*write_emulated)(unsigned long addr,
			      const void *val,
			      unsigned int bytes,
			      unsigned int *error,
			      struct x86_exception *fault,
			      struct kvm_vcpu *vcpu);

	/*
@@ -128,7 +131,7 @@ struct x86_emulate_ops {
				const void *old,
				const void *new,
				unsigned int bytes,
				unsigned int *error,
				struct x86_exception *fault,
				struct kvm_vcpu *vcpu);

	int (*pio_in_emulated)(int size, unsigned short port, void *val,
+31 −58
Original line number Diff line number Diff line
@@ -512,7 +512,7 @@ static int do_fetch_insn_byte(struct x86_emulate_ctxt *ctxt,
		cur_size = fc->end - fc->start;
		size = min(15UL - cur_size, PAGE_SIZE - offset_in_page(eip));
		rc = ops->fetch(ctxt->cs_base + eip, fc->data + cur_size,
				size, ctxt->vcpu, NULL);
				size, ctxt->vcpu, &ctxt->exception);
		if (rc != X86EMUL_CONTINUE)
			return rc;
		fc->end += size;
@@ -565,12 +565,12 @@ static int read_descriptor(struct x86_emulate_ctxt *ctxt,
		op_bytes = 3;
	*address = 0;
	rc = ops->read_std(linear(ctxt, addr), (unsigned long *)size, 2,
			   ctxt->vcpu, NULL);
			   ctxt->vcpu, &ctxt->exception);
	if (rc != X86EMUL_CONTINUE)
		return rc;
	addr.ea += 2;
	rc = ops->read_std(linear(ctxt, addr), address, op_bytes,
			   ctxt->vcpu, NULL);
			   ctxt->vcpu, &ctxt->exception);
	return rc;
}

@@ -816,7 +816,6 @@ static int read_emulated(struct x86_emulate_ctxt *ctxt,
{
	int rc;
	struct read_cache *mc = &ctxt->decode.mem_read;
	u32 err;

	while (size) {
		int n = min(size, 8u);
@@ -824,10 +823,8 @@ static int read_emulated(struct x86_emulate_ctxt *ctxt,
		if (mc->pos < mc->end)
			goto read_cached;

		rc = ops->read_emulated(addr, mc->data + mc->end, n, &err,
					ctxt->vcpu);
		if (rc == X86EMUL_PROPAGATE_FAULT)
			emulate_pf(ctxt);
		rc = ops->read_emulated(addr, mc->data + mc->end, n,
					&ctxt->exception, ctxt->vcpu);
		if (rc != X86EMUL_CONTINUE)
			return rc;
		mc->end += n;
@@ -902,7 +899,6 @@ static int read_segment_descriptor(struct x86_emulate_ctxt *ctxt,
	struct desc_ptr dt;
	u16 index = selector >> 3;
	int ret;
	u32 err;
	ulong addr;

	get_descriptor_table_ptr(ctxt, ops, selector, &dt);
@@ -912,9 +908,8 @@ static int read_segment_descriptor(struct x86_emulate_ctxt *ctxt,
		return X86EMUL_PROPAGATE_FAULT;
	}
	addr = dt.address + index * 8;
	ret = ops->read_std(addr, desc, sizeof *desc, ctxt->vcpu,  &err);
	if (ret == X86EMUL_PROPAGATE_FAULT)
		emulate_pf(ctxt);
	ret = ops->read_std(addr, desc, sizeof *desc, ctxt->vcpu,
			    &ctxt->exception);

       return ret;
}
@@ -926,7 +921,6 @@ static int write_segment_descriptor(struct x86_emulate_ctxt *ctxt,
{
	struct desc_ptr dt;
	u16 index = selector >> 3;
	u32 err;
	ulong addr;
	int ret;

@@ -938,9 +932,8 @@ static int write_segment_descriptor(struct x86_emulate_ctxt *ctxt,
	}

	addr = dt.address + index * 8;
	ret = ops->write_std(addr, desc, sizeof *desc, ctxt->vcpu, &err);
	if (ret == X86EMUL_PROPAGATE_FAULT)
		emulate_pf(ctxt);
	ret = ops->write_std(addr, desc, sizeof *desc, ctxt->vcpu,
			     &ctxt->exception);

	return ret;
}
@@ -1087,7 +1080,6 @@ static inline int writeback(struct x86_emulate_ctxt *ctxt,
{
	int rc;
	struct decode_cache *c = &ctxt->decode;
	u32 err;

	switch (c->dst.type) {
	case OP_REG:
@@ -1100,17 +1092,15 @@ static inline int writeback(struct x86_emulate_ctxt *ctxt,
					&c->dst.orig_val,
					&c->dst.val,
					c->dst.bytes,
					&err,
					&ctxt->exception,
					ctxt->vcpu);
		else
			rc = ops->write_emulated(
					linear(ctxt, c->dst.addr.mem),
					&c->dst.val,
					c->dst.bytes,
					&err,
					&ctxt->exception,
					ctxt->vcpu);
		if (rc == X86EMUL_PROPAGATE_FAULT)
			emulate_pf(ctxt);
		if (rc != X86EMUL_CONTINUE)
			return rc;
		break;
@@ -1283,7 +1273,6 @@ int emulate_int_real(struct x86_emulate_ctxt *ctxt,
	gva_t cs_addr;
	gva_t eip_addr;
	u16 cs, eip;
	u32 err;

	/* TODO: Add limit checks */
	c->src.val = ctxt->eflags;
@@ -1313,11 +1302,11 @@ int emulate_int_real(struct x86_emulate_ctxt *ctxt,
	eip_addr = dt.address + (irq << 2);
	cs_addr = dt.address + (irq << 2) + 2;

	rc = ops->read_std(cs_addr, &cs, 2, ctxt->vcpu, &err);
	rc = ops->read_std(cs_addr, &cs, 2, ctxt->vcpu, &ctxt->exception);
	if (rc != X86EMUL_CONTINUE)
		return rc;

	rc = ops->read_std(eip_addr, &eip, 2, ctxt->vcpu, &err);
	rc = ops->read_std(eip_addr, &eip, 2, ctxt->vcpu, &ctxt->exception);
	if (rc != X86EMUL_CONTINUE)
		return rc;

@@ -1930,33 +1919,27 @@ static int task_switch_16(struct x86_emulate_ctxt *ctxt,
{
	struct tss_segment_16 tss_seg;
	int ret;
	u32 err, new_tss_base = get_desc_base(new_desc);
	u32 new_tss_base = get_desc_base(new_desc);

	ret = ops->read_std(old_tss_base, &tss_seg, sizeof tss_seg, ctxt->vcpu,
			    &err);
	if (ret == X86EMUL_PROPAGATE_FAULT) {
			    &ctxt->exception);
	if (ret == X86EMUL_PROPAGATE_FAULT)
		/* FIXME: need to provide precise fault address */
		emulate_pf(ctxt);
		return ret;
	}

	save_state_to_tss16(ctxt, ops, &tss_seg);

	ret = ops->write_std(old_tss_base, &tss_seg, sizeof tss_seg, ctxt->vcpu,
			     &err);
	if (ret == X86EMUL_PROPAGATE_FAULT) {
			     &ctxt->exception);
	if (ret == X86EMUL_PROPAGATE_FAULT)
		/* FIXME: need to provide precise fault address */
		emulate_pf(ctxt);
		return ret;
	}

	ret = ops->read_std(new_tss_base, &tss_seg, sizeof tss_seg, ctxt->vcpu,
			    &err);
	if (ret == X86EMUL_PROPAGATE_FAULT) {
			    &ctxt->exception);
	if (ret == X86EMUL_PROPAGATE_FAULT)
		/* FIXME: need to provide precise fault address */
		emulate_pf(ctxt);
		return ret;
	}

	if (old_tss_sel != 0xffff) {
		tss_seg.prev_task_link = old_tss_sel;
@@ -1964,13 +1947,11 @@ static int task_switch_16(struct x86_emulate_ctxt *ctxt,
		ret = ops->write_std(new_tss_base,
				     &tss_seg.prev_task_link,
				     sizeof tss_seg.prev_task_link,
				     ctxt->vcpu, &err);
		if (ret == X86EMUL_PROPAGATE_FAULT) {
				     ctxt->vcpu, &ctxt->exception);
		if (ret == X86EMUL_PROPAGATE_FAULT)
			/* FIXME: need to provide precise fault address */
			emulate_pf(ctxt);
			return ret;
	}
	}

	return load_state_from_tss16(ctxt, ops, &tss_seg);
}
@@ -2072,33 +2053,27 @@ static int task_switch_32(struct x86_emulate_ctxt *ctxt,
{
	struct tss_segment_32 tss_seg;
	int ret;
	u32 err, new_tss_base = get_desc_base(new_desc);
	u32 new_tss_base = get_desc_base(new_desc);

	ret = ops->read_std(old_tss_base, &tss_seg, sizeof tss_seg, ctxt->vcpu,
			    &err);
	if (ret == X86EMUL_PROPAGATE_FAULT) {
			    &ctxt->exception);
	if (ret == X86EMUL_PROPAGATE_FAULT)
		/* FIXME: need to provide precise fault address */
		emulate_pf(ctxt);
		return ret;
	}

	save_state_to_tss32(ctxt, ops, &tss_seg);

	ret = ops->write_std(old_tss_base, &tss_seg, sizeof tss_seg, ctxt->vcpu,
			     &err);
	if (ret == X86EMUL_PROPAGATE_FAULT) {
			     &ctxt->exception);
	if (ret == X86EMUL_PROPAGATE_FAULT)
		/* FIXME: need to provide precise fault address */
		emulate_pf(ctxt);
		return ret;
	}

	ret = ops->read_std(new_tss_base, &tss_seg, sizeof tss_seg, ctxt->vcpu,
			    &err);
	if (ret == X86EMUL_PROPAGATE_FAULT) {
			    &ctxt->exception);
	if (ret == X86EMUL_PROPAGATE_FAULT)
		/* FIXME: need to provide precise fault address */
		emulate_pf(ctxt);
		return ret;
	}

	if (old_tss_sel != 0xffff) {
		tss_seg.prev_task_link = old_tss_sel;
@@ -2106,13 +2081,11 @@ static int task_switch_32(struct x86_emulate_ctxt *ctxt,
		ret = ops->write_std(new_tss_base,
				     &tss_seg.prev_task_link,
				     sizeof tss_seg.prev_task_link,
				     ctxt->vcpu, &err);
		if (ret == X86EMUL_PROPAGATE_FAULT) {
				     ctxt->vcpu, &ctxt->exception);
		if (ret == X86EMUL_PROPAGATE_FAULT)
			/* FIXME: need to provide precise fault address */
			emulate_pf(ctxt);
			return ret;
	}
	}

	return load_state_from_tss32(ctxt, ops, &tss_seg);
}
+44 −32
Original line number Diff line number Diff line
@@ -3642,24 +3642,31 @@ gpa_t kvm_mmu_gva_to_gpa_system(struct kvm_vcpu *vcpu, gva_t gva, u32 *error)
	return vcpu->arch.walk_mmu->gva_to_gpa(vcpu, gva, 0, error);
}

static int make_page_fault(struct x86_exception *exception, u32 error)
{
	exception->vector = PF_VECTOR;
	exception->error_code_valid = true;
	exception->error_code = error;
	return X86EMUL_PROPAGATE_FAULT;
}

static int kvm_read_guest_virt_helper(gva_t addr, void *val, unsigned int bytes,
				      struct kvm_vcpu *vcpu, u32 access,
				      u32 *error)
				      struct x86_exception *exception)
{
	void *data = val;
	int r = X86EMUL_CONTINUE;
	u32 error;

	while (bytes) {
		gpa_t gpa = vcpu->arch.walk_mmu->gva_to_gpa(vcpu, addr, access,
							    error);
							    &error);
		unsigned offset = addr & (PAGE_SIZE-1);
		unsigned toread = min(bytes, (unsigned)PAGE_SIZE - offset);
		int ret;

		if (gpa == UNMAPPED_GVA) {
			r = X86EMUL_PROPAGATE_FAULT;
			goto out;
		}
		if (gpa == UNMAPPED_GVA)
			return make_page_fault(exception, error);
		ret = kvm_read_guest(vcpu->kvm, gpa, data, toread);
		if (ret < 0) {
			r = X86EMUL_IO_NEEDED;
@@ -3676,47 +3683,50 @@ static int kvm_read_guest_virt_helper(gva_t addr, void *val, unsigned int bytes,

/* used for instruction fetching */
static int kvm_fetch_guest_virt(gva_t addr, void *val, unsigned int bytes,
				struct kvm_vcpu *vcpu, u32 *error)
				struct kvm_vcpu *vcpu,
				struct x86_exception *exception)
{
	u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0;
	return kvm_read_guest_virt_helper(addr, val, bytes, vcpu,
					  access | PFERR_FETCH_MASK, error);
					  access | PFERR_FETCH_MASK,
					  exception);
}

static int kvm_read_guest_virt(gva_t addr, void *val, unsigned int bytes,
			       struct kvm_vcpu *vcpu, u32 *error)
			       struct kvm_vcpu *vcpu,
			       struct x86_exception *exception)
{
	u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0;
	return kvm_read_guest_virt_helper(addr, val, bytes, vcpu, access,
					  error);
					  exception);
}

static int kvm_read_guest_virt_system(gva_t addr, void *val, unsigned int bytes,
			       struct kvm_vcpu *vcpu, u32 *error)
				      struct kvm_vcpu *vcpu,
				      struct x86_exception *exception)
{
	return kvm_read_guest_virt_helper(addr, val, bytes, vcpu, 0, error);
	return kvm_read_guest_virt_helper(addr, val, bytes, vcpu, 0, exception);
}

static int kvm_write_guest_virt_system(gva_t addr, void *val,
				       unsigned int bytes,
				       struct kvm_vcpu *vcpu,
				       u32 *error)
				       struct x86_exception *exception)
{
	void *data = val;
	int r = X86EMUL_CONTINUE;
	u32 error;

	while (bytes) {
		gpa_t gpa =  vcpu->arch.walk_mmu->gva_to_gpa(vcpu, addr,
							     PFERR_WRITE_MASK,
							     error);
							     &error);
		unsigned offset = addr & (PAGE_SIZE-1);
		unsigned towrite = min(bytes, (unsigned)PAGE_SIZE - offset);
		int ret;

		if (gpa == UNMAPPED_GVA) {
			r = X86EMUL_PROPAGATE_FAULT;
			goto out;
		}
		if (gpa == UNMAPPED_GVA)
			return make_page_fault(exception, error);
		ret = kvm_write_guest(vcpu->kvm, gpa, data, towrite);
		if (ret < 0) {
			r = X86EMUL_IO_NEEDED;
@@ -3734,10 +3744,11 @@ static int kvm_write_guest_virt_system(gva_t addr, void *val,
static int emulator_read_emulated(unsigned long addr,
				  void *val,
				  unsigned int bytes,
				  unsigned int *error_code,
				  struct x86_exception *exception,
				  struct kvm_vcpu *vcpu)
{
	gpa_t                 gpa;
	u32 error_code;

	if (vcpu->mmio_read_completed) {
		memcpy(val, vcpu->mmio_data, bytes);
@@ -3747,16 +3758,16 @@ static int emulator_read_emulated(unsigned long addr,
		return X86EMUL_CONTINUE;
	}

	gpa = kvm_mmu_gva_to_gpa_read(vcpu, addr, error_code);
	gpa = kvm_mmu_gva_to_gpa_read(vcpu, addr, &error_code);

	if (gpa == UNMAPPED_GVA)
		return X86EMUL_PROPAGATE_FAULT;
		return make_page_fault(exception, error_code);

	/* For APIC access vmexit */
	if ((gpa & PAGE_MASK) == APIC_DEFAULT_PHYS_BASE)
		goto mmio;

	if (kvm_read_guest_virt(addr, val, bytes, vcpu, NULL)
	if (kvm_read_guest_virt(addr, val, bytes, vcpu, exception)
	    == X86EMUL_CONTINUE)
		return X86EMUL_CONTINUE;

@@ -3795,15 +3806,16 @@ int emulator_write_phys(struct kvm_vcpu *vcpu, gpa_t gpa,
static int emulator_write_emulated_onepage(unsigned long addr,
					   const void *val,
					   unsigned int bytes,
					   unsigned int *error_code,
					   struct x86_exception *exception,
					   struct kvm_vcpu *vcpu)
{
	gpa_t                 gpa;
	u32 error_code;

	gpa = kvm_mmu_gva_to_gpa_write(vcpu, addr, error_code);
	gpa = kvm_mmu_gva_to_gpa_write(vcpu, addr, &error_code);

	if (gpa == UNMAPPED_GVA)
		return X86EMUL_PROPAGATE_FAULT;
		return make_page_fault(exception, error_code);

	/* For APIC access vmexit */
	if ((gpa & PAGE_MASK) == APIC_DEFAULT_PHYS_BASE)
@@ -3833,7 +3845,7 @@ static int emulator_write_emulated_onepage(unsigned long addr,
int emulator_write_emulated(unsigned long addr,
			    const void *val,
			    unsigned int bytes,
			    unsigned int *error_code,
			    struct x86_exception *exception,
			    struct kvm_vcpu *vcpu)
{
	/* Crossing a page boundary? */
@@ -3841,7 +3853,7 @@ int emulator_write_emulated(unsigned long addr,
		int rc, now;

		now = -addr & ~PAGE_MASK;
		rc = emulator_write_emulated_onepage(addr, val, now, error_code,
		rc = emulator_write_emulated_onepage(addr, val, now, exception,
						     vcpu);
		if (rc != X86EMUL_CONTINUE)
			return rc;
@@ -3849,7 +3861,7 @@ int emulator_write_emulated(unsigned long addr,
		val += now;
		bytes -= now;
	}
	return emulator_write_emulated_onepage(addr, val, bytes, error_code,
	return emulator_write_emulated_onepage(addr, val, bytes, exception,
					       vcpu);
}

@@ -3867,7 +3879,7 @@ static int emulator_cmpxchg_emulated(unsigned long addr,
				     const void *old,
				     const void *new,
				     unsigned int bytes,
				     unsigned int *error_code,
				     struct x86_exception *exception,
				     struct kvm_vcpu *vcpu)
{
	gpa_t gpa;
@@ -3925,7 +3937,7 @@ static int emulator_cmpxchg_emulated(unsigned long addr,
emul_write:
	printk_once(KERN_WARNING "kvm: emulating exchange as write\n");

	return emulator_write_emulated(addr, new, bytes, error_code, vcpu);
	return emulator_write_emulated(addr, new, bytes, exception, vcpu);
}

static int kernel_pio(struct kvm_vcpu *vcpu, void *pd)