Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 02626b6a authored by Marcelo Tosatti's avatar Marcelo Tosatti Committed by Avi Kivity
Browse files

KVM: x86: fix kvm_write_tsc() TSC matching thinko



kvm_write_tsc() converts from guest TSC to microseconds, not nanoseconds
as intended. The result is that the window for matching is 1000 seconds,
not 1 second.

Microsecond precision is enough for checking whether the TSC write delta
is within the heuristic values, so use it instead of nanoseconds.

Noted by Avi Kivity.

Signed-off-by: default avatarMarcelo Tosatti <mtosatti@redhat.com>
Signed-off-by: default avatarAvi Kivity <avi@redhat.com>
parent b74f05d6
Loading
Loading
Loading
Loading
+10 −9
Original line number Original line Diff line number Diff line
@@ -1025,7 +1025,7 @@ void kvm_write_tsc(struct kvm_vcpu *vcpu, u64 data)
	struct kvm *kvm = vcpu->kvm;
	struct kvm *kvm = vcpu->kvm;
	u64 offset, ns, elapsed;
	u64 offset, ns, elapsed;
	unsigned long flags;
	unsigned long flags;
	s64 nsdiff;
	s64 usdiff;


	raw_spin_lock_irqsave(&kvm->arch.tsc_write_lock, flags);
	raw_spin_lock_irqsave(&kvm->arch.tsc_write_lock, flags);
	offset = kvm_x86_ops->compute_tsc_offset(vcpu, data);
	offset = kvm_x86_ops->compute_tsc_offset(vcpu, data);
@@ -1033,18 +1033,19 @@ void kvm_write_tsc(struct kvm_vcpu *vcpu, u64 data)
	elapsed = ns - kvm->arch.last_tsc_nsec;
	elapsed = ns - kvm->arch.last_tsc_nsec;


	/* n.b - signed multiplication and division required */
	/* n.b - signed multiplication and division required */
	nsdiff = data - kvm->arch.last_tsc_write;
	usdiff = data - kvm->arch.last_tsc_write;
#ifdef CONFIG_X86_64
#ifdef CONFIG_X86_64
	nsdiff = (nsdiff * 1000) / vcpu->arch.virtual_tsc_khz;
	usdiff = (usdiff * 1000) / vcpu->arch.virtual_tsc_khz;
#else
#else
	/* do_div() only does unsigned */
	/* do_div() only does unsigned */
	asm("idivl %2; xor %%edx, %%edx"
	asm("idivl %2; xor %%edx, %%edx"
	    : "=A"(nsdiff)
	    : "=A"(usdiff)
	    : "A"(nsdiff * 1000), "rm"(vcpu->arch.virtual_tsc_khz));
	    : "A"(usdiff * 1000), "rm"(vcpu->arch.virtual_tsc_khz));
#endif
#endif
	nsdiff -= elapsed;
	do_div(elapsed, 1000);
	if (nsdiff < 0)
	usdiff -= elapsed;
		nsdiff = -nsdiff;
	if (usdiff < 0)
		usdiff = -usdiff;


	/*
	/*
	 * Special case: TSC write with a small delta (1 second) of virtual
	 * Special case: TSC write with a small delta (1 second) of virtual
@@ -1056,7 +1057,7 @@ void kvm_write_tsc(struct kvm_vcpu *vcpu, u64 data)
	 * compensation code attempt to catch up if we fall behind, but
	 * compensation code attempt to catch up if we fall behind, but
	 * it's better to try to match offsets from the beginning.
	 * it's better to try to match offsets from the beginning.
         */
         */
	if (nsdiff < NSEC_PER_SEC &&
	if (usdiff < USEC_PER_SEC &&
	    vcpu->arch.virtual_tsc_khz == kvm->arch.last_tsc_khz) {
	    vcpu->arch.virtual_tsc_khz == kvm->arch.last_tsc_khz) {
		if (!check_tsc_unstable()) {
		if (!check_tsc_unstable()) {
			offset = kvm->arch.cur_tsc_offset;
			offset = kvm->arch.cur_tsc_offset;