Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit c5e8ec8e authored by Denis Plotnikov's avatar Denis Plotnikov Committed by Radim Krčmář
Browse files

KVM: x86: remaster kvm_write_tsc code



Reuse existing code instead of using inline asm.
Make the code more concise and clear in the TSC
synchronization part.

Signed-off-by: default avatarDenis Plotnikov <dplotnikov@virtuozzo.com>
Reviewed-by: default avatarRoman Kagan <rkagan@virtuozzo.com>
Signed-off-by: default avatarRadim Krčmář <rkrcmar@redhat.com>
parent 900ab14c
Loading
Loading
Loading
Loading
+13 −39
Original line number Diff line number Diff line
@@ -1443,10 +1443,10 @@ void kvm_write_tsc(struct kvm_vcpu *vcpu, struct msr_data *msr)
	struct kvm *kvm = vcpu->kvm;
	u64 offset, ns, elapsed;
	unsigned long flags;
	s64 usdiff;
	bool matched;
	bool already_matched;
	u64 data = msr->data;
	bool synchronizing = false;

	raw_spin_lock_irqsave(&kvm->arch.tsc_write_lock, flags);
	offset = kvm_compute_tsc_offset(vcpu, data);
@@ -1454,51 +1454,25 @@ void kvm_write_tsc(struct kvm_vcpu *vcpu, struct msr_data *msr)
	elapsed = ns - kvm->arch.last_tsc_nsec;

	if (vcpu->arch.virtual_tsc_khz) {
		int faulted = 0;

		/* n.b - signed multiplication and division required */
		usdiff = data - kvm->arch.last_tsc_write;
#ifdef CONFIG_X86_64
		usdiff = (usdiff * 1000) / vcpu->arch.virtual_tsc_khz;
#else
		/* do_div() only does unsigned */
		asm("1: idivl %[divisor]\n"
		    "2: xor %%edx, %%edx\n"
		    "   movl $0, %[faulted]\n"
		    "3:\n"
		    ".section .fixup,\"ax\"\n"
		    "4: movl $1, %[faulted]\n"
		    "   jmp  3b\n"
		    ".previous\n"

		_ASM_EXTABLE(1b, 4b)

		: "=A"(usdiff), [faulted] "=r" (faulted)
		: "A"(usdiff * 1000), [divisor] "rm"(vcpu->arch.virtual_tsc_khz));

#endif
		do_div(elapsed, 1000);
		usdiff -= elapsed;
		if (usdiff < 0)
			usdiff = -usdiff;

		/* idivl overflow => difference is larger than USEC_PER_SEC */
		if (faulted)
			usdiff = USEC_PER_SEC;
	} else
		usdiff = USEC_PER_SEC; /* disable TSC match window below */
		u64 tsc_exp = kvm->arch.last_tsc_write +
					nsec_to_cycles(vcpu, elapsed);
		u64 tsc_hz = vcpu->arch.virtual_tsc_khz * 1000LL;
		/*
		 * Special case: TSC write with a small delta (1 second)
		 * of virtual cycle time against real time is
		 * interpreted as an attempt to synchronize the CPU.
		 */
		synchronizing = data < tsc_exp + tsc_hz &&
				data + tsc_hz > tsc_exp;
	}

	/*
	 * Special case: TSC write with a small delta (1 second) of virtual
	 * cycle time against real time is interpreted as an attempt to
	 * synchronize the CPU.
         *
	 * For a reliable TSC, we can match TSC offsets, and for an unstable
	 * TSC, we add elapsed time in this computation.  We could let the
	 * compensation code attempt to catch up if we fall behind, but
	 * it's better to try to match offsets from the beginning.
         */
	if (usdiff < USEC_PER_SEC &&
	if (synchronizing &&
	    vcpu->arch.virtual_tsc_khz == kvm->arch.last_tsc_khz) {
		if (!check_tsc_unstable()) {
			offset = kvm->arch.cur_tsc_offset;