Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 0f12244f authored by Gleb Natapov's avatar Gleb Natapov Committed by Avi Kivity
Browse files

KVM: x86 emulator: make set_cr() callback return error if it fails



Make set_cr() callback return error if it fails instead of injecting #GP
behind emulator's back.

Signed-off-by: default avatarGleb Natapov <gleb@redhat.com>
Signed-off-by: default avatarAvi Kivity <avi@redhat.com>
parent 79168fd1
Loading
Loading
Loading
Loading
+1 −1
Original line number Original line Diff line number Diff line
@@ -135,7 +135,7 @@ struct x86_emulate_ops {
	unsigned long (*get_cached_segment_base)(int seg, struct kvm_vcpu *vcpu);
	unsigned long (*get_cached_segment_base)(int seg, struct kvm_vcpu *vcpu);
	void (*get_gdt)(struct desc_ptr *dt, struct kvm_vcpu *vcpu);
	void (*get_gdt)(struct desc_ptr *dt, struct kvm_vcpu *vcpu);
	ulong (*get_cr)(int cr, struct kvm_vcpu *vcpu);
	ulong (*get_cr)(int cr, struct kvm_vcpu *vcpu);
	void (*set_cr)(int cr, ulong val, struct kvm_vcpu *vcpu);
	int (*set_cr)(int cr, ulong val, struct kvm_vcpu *vcpu);
	int (*cpl)(struct kvm_vcpu *vcpu);
	int (*cpl)(struct kvm_vcpu *vcpu);
	void (*set_rflags)(struct kvm_vcpu *vcpu, unsigned long rflags);
	void (*set_rflags)(struct kvm_vcpu *vcpu, unsigned long rflags);
	int (*get_dr)(int dr, unsigned long *dest, struct kvm_vcpu *vcpu);
	int (*get_dr)(int dr, unsigned long *dest, struct kvm_vcpu *vcpu);
+8 −2
Original line number Original line Diff line number Diff line
@@ -2272,7 +2272,10 @@ static int load_state_from_tss32(struct x86_emulate_ctxt *ctxt,
	struct decode_cache *c = &ctxt->decode;
	struct decode_cache *c = &ctxt->decode;
	int ret;
	int ret;


	ops->set_cr(3, tss->cr3, ctxt->vcpu);
	if (ops->set_cr(3, tss->cr3, ctxt->vcpu)) {
		kvm_inject_gp(ctxt->vcpu, 0);
		return X86EMUL_PROPAGATE_FAULT;
	}
	c->eip = tss->eip;
	c->eip = tss->eip;
	ctxt->eflags = tss->eflags | 2;
	ctxt->eflags = tss->eflags | 2;
	c->regs[VCPU_REGS_RAX] = tss->eax;
	c->regs[VCPU_REGS_RAX] = tss->eax;
@@ -3135,7 +3138,10 @@ x86_emulate_insn(struct x86_emulate_ctxt *ctxt, struct x86_emulate_ops *ops)
		c->dst.type = OP_NONE;	/* no writeback */
		c->dst.type = OP_NONE;	/* no writeback */
		break;
		break;
	case 0x22: /* mov reg, cr */
	case 0x22: /* mov reg, cr */
		ops->set_cr(c->modrm_reg, c->modrm_val, ctxt->vcpu);
		if (ops->set_cr(c->modrm_reg, c->modrm_val, ctxt->vcpu)) {
			kvm_inject_gp(ctxt->vcpu, 0);
			goto done;
		}
		c->dst.type = OP_NONE;
		c->dst.type = OP_NONE;
		break;
		break;
	case 0x23: /* mov from reg to dr */
	case 0x23: /* mov from reg to dr */
+75 −73
Original line number Original line Diff line number Diff line
@@ -414,57 +414,49 @@ static bool pdptrs_changed(struct kvm_vcpu *vcpu)
	return changed;
	return changed;
}
}


void kvm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
static int __kvm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
{
{
	cr0 |= X86_CR0_ET;
	cr0 |= X86_CR0_ET;


#ifdef CONFIG_X86_64
#ifdef CONFIG_X86_64
	if (cr0 & 0xffffffff00000000UL) {
	if (cr0 & 0xffffffff00000000UL)
		kvm_inject_gp(vcpu, 0);
		return 1;
		return;
	}
#endif
#endif


	cr0 &= ~CR0_RESERVED_BITS;
	cr0 &= ~CR0_RESERVED_BITS;


	if ((cr0 & X86_CR0_NW) && !(cr0 & X86_CR0_CD)) {
	if ((cr0 & X86_CR0_NW) && !(cr0 & X86_CR0_CD))
		kvm_inject_gp(vcpu, 0);
		return 1;
		return;
	}


	if ((cr0 & X86_CR0_PG) && !(cr0 & X86_CR0_PE)) {
	if ((cr0 & X86_CR0_PG) && !(cr0 & X86_CR0_PE))
		kvm_inject_gp(vcpu, 0);
		return 1;
		return;
	}


	if (!is_paging(vcpu) && (cr0 & X86_CR0_PG)) {
	if (!is_paging(vcpu) && (cr0 & X86_CR0_PG)) {
#ifdef CONFIG_X86_64
#ifdef CONFIG_X86_64
		if ((vcpu->arch.efer & EFER_LME)) {
		if ((vcpu->arch.efer & EFER_LME)) {
			int cs_db, cs_l;
			int cs_db, cs_l;


			if (!is_pae(vcpu)) {
			if (!is_pae(vcpu))
				kvm_inject_gp(vcpu, 0);
				return 1;
				return;
			}
			kvm_x86_ops->get_cs_db_l_bits(vcpu, &cs_db, &cs_l);
			kvm_x86_ops->get_cs_db_l_bits(vcpu, &cs_db, &cs_l);
			if (cs_l) {
			if (cs_l)
				kvm_inject_gp(vcpu, 0);
				return 1;
				return;

			}
		} else
		} else
#endif
#endif
		if (is_pae(vcpu) && !load_pdptrs(vcpu, vcpu->arch.cr3)) {
		if (is_pae(vcpu) && !load_pdptrs(vcpu, vcpu->arch.cr3))
			kvm_inject_gp(vcpu, 0);
			return 1;
			return;
		}

	}
	}


	kvm_x86_ops->set_cr0(vcpu, cr0);
	kvm_x86_ops->set_cr0(vcpu, cr0);


	kvm_mmu_reset_context(vcpu);
	kvm_mmu_reset_context(vcpu);
	return;
	return 0;
}

void kvm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
{
	if (__kvm_set_cr0(vcpu, cr0))
		kvm_inject_gp(vcpu, 0);
}
}
EXPORT_SYMBOL_GPL(kvm_set_cr0);
EXPORT_SYMBOL_GPL(kvm_set_cr0);


@@ -474,61 +466,56 @@ void kvm_lmsw(struct kvm_vcpu *vcpu, unsigned long msw)
}
}
EXPORT_SYMBOL_GPL(kvm_lmsw);
EXPORT_SYMBOL_GPL(kvm_lmsw);


void kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
int __kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
{
{
	unsigned long old_cr4 = kvm_read_cr4(vcpu);
	unsigned long old_cr4 = kvm_read_cr4(vcpu);
	unsigned long pdptr_bits = X86_CR4_PGE | X86_CR4_PSE | X86_CR4_PAE;
	unsigned long pdptr_bits = X86_CR4_PGE | X86_CR4_PSE | X86_CR4_PAE;


	if (cr4 & CR4_RESERVED_BITS) {
	if (cr4 & CR4_RESERVED_BITS)
		kvm_inject_gp(vcpu, 0);
		return 1;
		return;
	}


	if (is_long_mode(vcpu)) {
	if (is_long_mode(vcpu)) {
		if (!(cr4 & X86_CR4_PAE)) {
		if (!(cr4 & X86_CR4_PAE))
			kvm_inject_gp(vcpu, 0);
			return 1;
			return;
		}
	} else if (is_paging(vcpu) && (cr4 & X86_CR4_PAE)
	} else if (is_paging(vcpu) && (cr4 & X86_CR4_PAE)
		   && ((cr4 ^ old_cr4) & pdptr_bits)
		   && ((cr4 ^ old_cr4) & pdptr_bits)
		   && !load_pdptrs(vcpu, vcpu->arch.cr3)) {
		   && !load_pdptrs(vcpu, vcpu->arch.cr3))
		kvm_inject_gp(vcpu, 0);
		return 1;
		return;

	}
	if (cr4 & X86_CR4_VMXE)
		return 1;


	if (cr4 & X86_CR4_VMXE) {
		kvm_inject_gp(vcpu, 0);
		return;
	}
	kvm_x86_ops->set_cr4(vcpu, cr4);
	kvm_x86_ops->set_cr4(vcpu, cr4);
	vcpu->arch.cr4 = cr4;
	vcpu->arch.cr4 = cr4;
	kvm_mmu_reset_context(vcpu);
	kvm_mmu_reset_context(vcpu);

	return 0;
}

void kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
{
	if (__kvm_set_cr4(vcpu, cr4))
		kvm_inject_gp(vcpu, 0);
}
}
EXPORT_SYMBOL_GPL(kvm_set_cr4);
EXPORT_SYMBOL_GPL(kvm_set_cr4);


void kvm_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3)
static int __kvm_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3)
{
{
	if (cr3 == vcpu->arch.cr3 && !pdptrs_changed(vcpu)) {
	if (cr3 == vcpu->arch.cr3 && !pdptrs_changed(vcpu)) {
		kvm_mmu_sync_roots(vcpu);
		kvm_mmu_sync_roots(vcpu);
		kvm_mmu_flush_tlb(vcpu);
		kvm_mmu_flush_tlb(vcpu);
		return;
		return 0;
	}
	}


	if (is_long_mode(vcpu)) {
	if (is_long_mode(vcpu)) {
		if (cr3 & CR3_L_MODE_RESERVED_BITS) {
		if (cr3 & CR3_L_MODE_RESERVED_BITS)
			kvm_inject_gp(vcpu, 0);
			return 1;
			return;
		}
	} else {
	} else {
		if (is_pae(vcpu)) {
		if (is_pae(vcpu)) {
			if (cr3 & CR3_PAE_RESERVED_BITS) {
			if (cr3 & CR3_PAE_RESERVED_BITS)
				kvm_inject_gp(vcpu, 0);
				return 1;
				return;
			if (is_paging(vcpu) && !load_pdptrs(vcpu, cr3))
			}
				return 1;
			if (is_paging(vcpu) && !load_pdptrs(vcpu, cr3)) {
				kvm_inject_gp(vcpu, 0);
				return;
			}
		}
		}
		/*
		/*
		 * We don't check reserved bits in nonpae mode, because
		 * We don't check reserved bits in nonpae mode, because
@@ -546,24 +533,34 @@ void kvm_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3)
	 * to debug) behavior on the guest side.
	 * to debug) behavior on the guest side.
	 */
	 */
	if (unlikely(!gfn_to_memslot(vcpu->kvm, cr3 >> PAGE_SHIFT)))
	if (unlikely(!gfn_to_memslot(vcpu->kvm, cr3 >> PAGE_SHIFT)))
		kvm_inject_gp(vcpu, 0);
		return 1;
	else {
	vcpu->arch.cr3 = cr3;
	vcpu->arch.cr3 = cr3;
	vcpu->arch.mmu.new_cr3(vcpu);
	vcpu->arch.mmu.new_cr3(vcpu);
	return 0;
}
}
}
EXPORT_SYMBOL_GPL(kvm_set_cr3);


void kvm_set_cr8(struct kvm_vcpu *vcpu, unsigned long cr8)
void kvm_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3)
{
{
	if (cr8 & CR8_RESERVED_BITS) {
	if (__kvm_set_cr3(vcpu, cr3))
		kvm_inject_gp(vcpu, 0);
		kvm_inject_gp(vcpu, 0);
		return;
}
}
EXPORT_SYMBOL_GPL(kvm_set_cr3);

int __kvm_set_cr8(struct kvm_vcpu *vcpu, unsigned long cr8)
{
	if (cr8 & CR8_RESERVED_BITS)
		return 1;
	if (irqchip_in_kernel(vcpu->kvm))
	if (irqchip_in_kernel(vcpu->kvm))
		kvm_lapic_set_tpr(vcpu, cr8);
		kvm_lapic_set_tpr(vcpu, cr8);
	else
	else
		vcpu->arch.cr8 = cr8;
		vcpu->arch.cr8 = cr8;
	return 0;
}

void kvm_set_cr8(struct kvm_vcpu *vcpu, unsigned long cr8)
{
	if (__kvm_set_cr8(vcpu, cr8))
		kvm_inject_gp(vcpu, 0);
}
}
EXPORT_SYMBOL_GPL(kvm_set_cr8);
EXPORT_SYMBOL_GPL(kvm_set_cr8);


@@ -3681,27 +3678,32 @@ static unsigned long emulator_get_cr(int cr, struct kvm_vcpu *vcpu)
	return value;
	return value;
}
}


static void emulator_set_cr(int cr, unsigned long val, struct kvm_vcpu *vcpu)
static int emulator_set_cr(int cr, unsigned long val, struct kvm_vcpu *vcpu)
{
{
	int res = 0;

	switch (cr) {
	switch (cr) {
	case 0:
	case 0:
		kvm_set_cr0(vcpu, mk_cr_64(kvm_read_cr0(vcpu), val));
		res = __kvm_set_cr0(vcpu, mk_cr_64(kvm_read_cr0(vcpu), val));
		break;
		break;
	case 2:
	case 2:
		vcpu->arch.cr2 = val;
		vcpu->arch.cr2 = val;
		break;
		break;
	case 3:
	case 3:
		kvm_set_cr3(vcpu, val);
		res = __kvm_set_cr3(vcpu, val);
		break;
		break;
	case 4:
	case 4:
		kvm_set_cr4(vcpu, mk_cr_64(kvm_read_cr4(vcpu), val));
		res = __kvm_set_cr4(vcpu, mk_cr_64(kvm_read_cr4(vcpu), val));
		break;
		break;
	case 8:
	case 8:
		kvm_set_cr8(vcpu, val & 0xfUL);
		res = __kvm_set_cr8(vcpu, val & 0xfUL);
		break;
		break;
	default:
	default:
		vcpu_printf(vcpu, "%s: unexpected cr %u\n", __func__, cr);
		vcpu_printf(vcpu, "%s: unexpected cr %u\n", __func__, cr);
		res = -1;
	}
	}

	return res;
}
}


static int emulator_get_cpl(struct kvm_vcpu *vcpu)
static int emulator_get_cpl(struct kvm_vcpu *vcpu)