Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit c8639010 authored by Jan Kiszka's avatar Jan Kiszka Committed by Avi Kivity
Browse files

KVM: x86: Fix guest debug across vcpu INIT reset



If we reset a vcpu on INIT, we so far overwrote dr7 as provided by
KVM_SET_GUEST_DEBUG, and we also cleared switch_db_regs unconditionally.

Fix this by saving the dr7 used for guest debugging and calculating the
effective register value as well as switch_db_regs on any potential
change. This will change to focus of the set_guest_debug vendor op to
update_dp_bp_intercept.

Found while trying to stop on start_secondary.

Signed-off-by: default avatarJan Kiszka <jan.kiszka@siemens.com>
Signed-off-by: default avatarAvi Kivity <avi@redhat.com>
parent 7a84428a
Loading
Loading
Loading
Loading
+2 −2
Original line number Original line Diff line number Diff line
@@ -471,6 +471,7 @@ struct kvm_vcpu_arch {
	unsigned long dr6;
	unsigned long dr6;
	unsigned long dr7;
	unsigned long dr7;
	unsigned long eff_db[KVM_NR_DB_REGS];
	unsigned long eff_db[KVM_NR_DB_REGS];
	unsigned long guest_debug_dr7;


	u64 mcg_cap;
	u64 mcg_cap;
	u64 mcg_status;
	u64 mcg_status;
@@ -647,8 +648,7 @@ struct kvm_x86_ops {
	void (*vcpu_load)(struct kvm_vcpu *vcpu, int cpu);
	void (*vcpu_load)(struct kvm_vcpu *vcpu, int cpu);
	void (*vcpu_put)(struct kvm_vcpu *vcpu);
	void (*vcpu_put)(struct kvm_vcpu *vcpu);


	void (*set_guest_debug)(struct kvm_vcpu *vcpu,
	void (*update_db_bp_intercept)(struct kvm_vcpu *vcpu);
				struct kvm_guest_debug *dbg);
	int (*get_msr)(struct kvm_vcpu *vcpu, u32 msr_index, u64 *pdata);
	int (*get_msr)(struct kvm_vcpu *vcpu, u32 msr_index, u64 *pdata);
	int (*set_msr)(struct kvm_vcpu *vcpu, u32 msr_index, u64 data);
	int (*set_msr)(struct kvm_vcpu *vcpu, u32 msr_index, u64 data);
	u64 (*get_segment_base)(struct kvm_vcpu *vcpu, int seg);
	u64 (*get_segment_base)(struct kvm_vcpu *vcpu, int seg);
+4 −19
Original line number Original line Diff line number Diff line
@@ -1146,7 +1146,6 @@ static void init_vmcb(struct vcpu_svm *svm)


	svm_set_efer(&svm->vcpu, 0);
	svm_set_efer(&svm->vcpu, 0);
	save->dr6 = 0xffff0ff0;
	save->dr6 = 0xffff0ff0;
	save->dr7 = 0x400;
	kvm_set_rflags(&svm->vcpu, 2);
	kvm_set_rflags(&svm->vcpu, 2);
	save->rip = 0x0000fff0;
	save->rip = 0x0000fff0;
	svm->vcpu.arch.regs[VCPU_REGS_RIP] = save->rip;
	svm->vcpu.arch.regs[VCPU_REGS_RIP] = save->rip;
@@ -1643,7 +1642,7 @@ static void svm_set_segment(struct kvm_vcpu *vcpu,
	mark_dirty(svm->vmcb, VMCB_SEG);
	mark_dirty(svm->vmcb, VMCB_SEG);
}
}


static void update_db_intercept(struct kvm_vcpu *vcpu)
static void update_db_bp_intercept(struct kvm_vcpu *vcpu)
{
{
	struct vcpu_svm *svm = to_svm(vcpu);
	struct vcpu_svm *svm = to_svm(vcpu);


@@ -1663,20 +1662,6 @@ static void update_db_intercept(struct kvm_vcpu *vcpu)
		vcpu->guest_debug = 0;
		vcpu->guest_debug = 0;
}
}


static void svm_guest_debug(struct kvm_vcpu *vcpu, struct kvm_guest_debug *dbg)
{
	struct vcpu_svm *svm = to_svm(vcpu);

	if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP)
		svm->vmcb->save.dr7 = dbg->arch.debugreg[7];
	else
		svm->vmcb->save.dr7 = vcpu->arch.dr7;

	mark_dirty(svm->vmcb, VMCB_DR);

	update_db_intercept(vcpu);
}

static void new_asid(struct vcpu_svm *svm, struct svm_cpu_data *sd)
static void new_asid(struct vcpu_svm *svm, struct svm_cpu_data *sd)
{
{
	if (sd->next_asid > sd->max_asid) {
	if (sd->next_asid > sd->max_asid) {
@@ -1748,7 +1733,7 @@ static int db_interception(struct vcpu_svm *svm)
		if (!(svm->vcpu.guest_debug & KVM_GUESTDBG_SINGLESTEP))
		if (!(svm->vcpu.guest_debug & KVM_GUESTDBG_SINGLESTEP))
			svm->vmcb->save.rflags &=
			svm->vmcb->save.rflags &=
				~(X86_EFLAGS_TF | X86_EFLAGS_RF);
				~(X86_EFLAGS_TF | X86_EFLAGS_RF);
		update_db_intercept(&svm->vcpu);
		update_db_bp_intercept(&svm->vcpu);
	}
	}


	if (svm->vcpu.guest_debug &
	if (svm->vcpu.guest_debug &
@@ -3659,7 +3644,7 @@ static void enable_nmi_window(struct kvm_vcpu *vcpu)
	 */
	 */
	svm->nmi_singlestep = true;
	svm->nmi_singlestep = true;
	svm->vmcb->save.rflags |= (X86_EFLAGS_TF | X86_EFLAGS_RF);
	svm->vmcb->save.rflags |= (X86_EFLAGS_TF | X86_EFLAGS_RF);
	update_db_intercept(vcpu);
	update_db_bp_intercept(vcpu);
}
}


static int svm_set_tss_addr(struct kvm *kvm, unsigned int addr)
static int svm_set_tss_addr(struct kvm *kvm, unsigned int addr)
@@ -4253,7 +4238,7 @@ static struct kvm_x86_ops svm_x86_ops = {
	.vcpu_load = svm_vcpu_load,
	.vcpu_load = svm_vcpu_load,
	.vcpu_put = svm_vcpu_put,
	.vcpu_put = svm_vcpu_put,


	.set_guest_debug = svm_guest_debug,
	.update_db_bp_intercept = update_db_bp_intercept,
	.get_msr = svm_get_msr,
	.get_msr = svm_get_msr,
	.set_msr = svm_set_msr,
	.set_msr = svm_set_msr,
	.get_segment_base = svm_get_segment_base,
	.get_segment_base = svm_get_segment_base,
+1 −13
Original line number Original line Diff line number Diff line
@@ -2288,16 +2288,6 @@ static void vmx_cache_reg(struct kvm_vcpu *vcpu, enum kvm_reg reg)
	}
	}
}
}


static void set_guest_debug(struct kvm_vcpu *vcpu, struct kvm_guest_debug *dbg)
{
	if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP)
		vmcs_writel(GUEST_DR7, dbg->arch.debugreg[7]);
	else
		vmcs_writel(GUEST_DR7, vcpu->arch.dr7);

	update_exception_bitmap(vcpu);
}

static __init int cpu_has_kvm_support(void)
static __init int cpu_has_kvm_support(void)
{
{
	return cpu_has_vmx();
	return cpu_has_vmx();
@@ -3960,8 +3950,6 @@ static int vmx_vcpu_reset(struct kvm_vcpu *vcpu)
		kvm_rip_write(vcpu, 0);
		kvm_rip_write(vcpu, 0);
	kvm_register_write(vcpu, VCPU_REGS_RSP, 0);
	kvm_register_write(vcpu, VCPU_REGS_RSP, 0);


	vmcs_writel(GUEST_DR7, 0x400);

	vmcs_writel(GUEST_GDTR_BASE, 0);
	vmcs_writel(GUEST_GDTR_BASE, 0);
	vmcs_write32(GUEST_GDTR_LIMIT, 0xffff);
	vmcs_write32(GUEST_GDTR_LIMIT, 0xffff);


@@ -7237,7 +7225,7 @@ static struct kvm_x86_ops vmx_x86_ops = {
	.vcpu_load = vmx_vcpu_load,
	.vcpu_load = vmx_vcpu_load,
	.vcpu_put = vmx_vcpu_put,
	.vcpu_put = vmx_vcpu_put,


	.set_guest_debug = set_guest_debug,
	.update_db_bp_intercept = update_exception_bitmap,
	.get_msr = vmx_get_msr,
	.get_msr = vmx_get_msr,
	.set_msr = vmx_set_msr,
	.set_msr = vmx_set_msr,
	.get_segment_base = vmx_get_segment_base,
	.get_segment_base = vmx_get_segment_base,
+17 −9
Original line number Original line Diff line number Diff line
@@ -692,6 +692,18 @@ unsigned long kvm_get_cr8(struct kvm_vcpu *vcpu)
}
}
EXPORT_SYMBOL_GPL(kvm_get_cr8);
EXPORT_SYMBOL_GPL(kvm_get_cr8);


static void kvm_update_dr7(struct kvm_vcpu *vcpu)
{
	unsigned long dr7;

	if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP)
		dr7 = vcpu->arch.guest_debug_dr7;
	else
		dr7 = vcpu->arch.dr7;
	kvm_x86_ops->set_dr7(vcpu, dr7);
	vcpu->arch.switch_db_regs = (dr7 & DR7_BP_EN_MASK);
}

static int __kvm_set_dr(struct kvm_vcpu *vcpu, int dr, unsigned long val)
static int __kvm_set_dr(struct kvm_vcpu *vcpu, int dr, unsigned long val)
{
{
	switch (dr) {
	switch (dr) {
@@ -717,10 +729,7 @@ static int __kvm_set_dr(struct kvm_vcpu *vcpu, int dr, unsigned long val)
		if (val & 0xffffffff00000000ULL)
		if (val & 0xffffffff00000000ULL)
			return -1; /* #GP */
			return -1; /* #GP */
		vcpu->arch.dr7 = (val & DR7_VOLATILE) | DR7_FIXED_1;
		vcpu->arch.dr7 = (val & DR7_VOLATILE) | DR7_FIXED_1;
		if (!(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP)) {
		kvm_update_dr7(vcpu);
			kvm_x86_ops->set_dr7(vcpu, vcpu->arch.dr7);
			vcpu->arch.switch_db_regs = (val & DR7_BP_EN_MASK);
		}
		break;
		break;
	}
	}


@@ -5851,13 +5860,12 @@ int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
	if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP) {
	if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP) {
		for (i = 0; i < KVM_NR_DB_REGS; ++i)
		for (i = 0; i < KVM_NR_DB_REGS; ++i)
			vcpu->arch.eff_db[i] = dbg->arch.debugreg[i];
			vcpu->arch.eff_db[i] = dbg->arch.debugreg[i];
		vcpu->arch.switch_db_regs =
		vcpu->arch.guest_debug_dr7 = dbg->arch.debugreg[7];
			(dbg->arch.debugreg[7] & DR7_BP_EN_MASK);
	} else {
	} else {
		for (i = 0; i < KVM_NR_DB_REGS; i++)
		for (i = 0; i < KVM_NR_DB_REGS; i++)
			vcpu->arch.eff_db[i] = vcpu->arch.db[i];
			vcpu->arch.eff_db[i] = vcpu->arch.db[i];
		vcpu->arch.switch_db_regs = (vcpu->arch.dr7 & DR7_BP_EN_MASK);
	}
	}
	kvm_update_dr7(vcpu);


	if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP)
	if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP)
		vcpu->arch.singlestep_rip = kvm_rip_read(vcpu) +
		vcpu->arch.singlestep_rip = kvm_rip_read(vcpu) +
@@ -5869,7 +5877,7 @@ int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
	 */
	 */
	kvm_set_rflags(vcpu, rflags);
	kvm_set_rflags(vcpu, rflags);


	kvm_x86_ops->set_guest_debug(vcpu, dbg);
	kvm_x86_ops->update_db_bp_intercept(vcpu);


	r = 0;
	r = 0;


@@ -6045,10 +6053,10 @@ int kvm_arch_vcpu_reset(struct kvm_vcpu *vcpu)
	vcpu->arch.nmi_pending = 0;
	vcpu->arch.nmi_pending = 0;
	vcpu->arch.nmi_injected = false;
	vcpu->arch.nmi_injected = false;


	vcpu->arch.switch_db_regs = 0;
	memset(vcpu->arch.db, 0, sizeof(vcpu->arch.db));
	memset(vcpu->arch.db, 0, sizeof(vcpu->arch.db));
	vcpu->arch.dr6 = DR6_FIXED_1;
	vcpu->arch.dr6 = DR6_FIXED_1;
	vcpu->arch.dr7 = DR7_FIXED_1;
	vcpu->arch.dr7 = DR7_FIXED_1;
	kvm_update_dr7(vcpu);


	kvm_make_request(KVM_REQ_EVENT, vcpu);
	kvm_make_request(KVM_REQ_EVENT, vcpu);
	vcpu->arch.apf.msr_val = 0;
	vcpu->arch.apf.msr_val = 0;