Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 019960ae authored by Avi Kivity's avatar Avi Kivity
Browse files

KVM: VMX: Don't adjust tsc offset forward



Most Intel hosts have a stable tsc, and playing with the offset only
reduces accuracy.  By limiting tsc offset adjustment only to forward updates,
we effectively disable tsc offset adjustment on these hosts.

Signed-off-by: default avatarAvi Kivity <avi@qumranet.com>
parent b8688d51
Loading
Loading
Loading
Loading
+6 −3
Original line number Original line Diff line number Diff line
@@ -519,7 +519,7 @@ static void vmx_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
{
{
	struct vcpu_vmx *vmx = to_vmx(vcpu);
	struct vcpu_vmx *vmx = to_vmx(vcpu);
	u64 phys_addr = __pa(vmx->vmcs);
	u64 phys_addr = __pa(vmx->vmcs);
	u64 tsc_this, delta;
	u64 tsc_this, delta, new_offset;


	if (vcpu->cpu != cpu) {
	if (vcpu->cpu != cpu) {
		vcpu_clear(vmx);
		vcpu_clear(vmx);
@@ -559,8 +559,11 @@ static void vmx_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
		 * Make sure the time stamp counter is monotonous.
		 * Make sure the time stamp counter is monotonous.
		 */
		 */
		rdtscll(tsc_this);
		rdtscll(tsc_this);
		if (tsc_this < vcpu->arch.host_tsc) {
			delta = vcpu->arch.host_tsc - tsc_this;
			delta = vcpu->arch.host_tsc - tsc_this;
		vmcs_write64(TSC_OFFSET, vmcs_read64(TSC_OFFSET) + delta);
			new_offset = vmcs_read64(TSC_OFFSET) + delta;
			vmcs_write64(TSC_OFFSET, new_offset);
		}
	}
	}
}
}