Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 108b249c authored by Paolo Bonzini's avatar Paolo Bonzini
Browse files

KVM: x86: introduce get_kvmclock_ns



Introduce a function that reads the exact nanoseconds value that is
provided to the guest in kvmclock.  This crystallizes the notion of
kvmclock as a thin veneer over a stable TSC, that the guest will
(hopefully) convert with NTP.  In other words, kvmclock is *not* a
paravirtualized host-to-guest NTP.

Drop the get_kernel_ns() function, that was used both to get the base
value of the master clock and to get the current value of kvmclock.
The former use is replaced by ktime_get_boot_ns(), the latter is
the purpose of get_kernel_ns().

This also allows KVM to provide a Hyper-V time reference counter that
is synchronized with the time that is computed from the TSC page.

Reviewed-by: default avatarRoman Kagan <rkagan@virtuozzo.com>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent 67198ac3
Loading
Loading
Loading
Loading
+1 −1
Original line number Diff line number Diff line
@@ -129,7 +129,7 @@ static notrace cycle_t vread_pvclock(int *mode)
			return 0;
		}

		ret = __pvclock_read_cycles(pvti);
		ret = __pvclock_read_cycles(pvti, rdtsc_ordered());
	} while (pvclock_read_retry(pvti, version));

	/* refer to vread_tsc() comment for rationale */
+3 −2
Original line number Diff line number Diff line
@@ -87,9 +87,10 @@ static inline u64 pvclock_scale_delta(u64 delta, u32 mul_frac, int shift)
}

static __always_inline
cycle_t __pvclock_read_cycles(const struct pvclock_vcpu_time_info *src)
cycle_t __pvclock_read_cycles(const struct pvclock_vcpu_time_info *src,
			      u64 tsc)
{
	u64 delta = rdtsc_ordered() - src->tsc_timestamp;
	u64 delta = tsc - src->tsc_timestamp;
	cycle_t offset = pvclock_scale_delta(delta, src->tsc_to_system_mul,
					     src->tsc_shift);
	return src->system_time + offset;
+1 −1
Original line number Diff line number Diff line
@@ -80,7 +80,7 @@ cycle_t pvclock_clocksource_read(struct pvclock_vcpu_time_info *src)

	do {
		version = pvclock_read_begin(src);
		ret = __pvclock_read_cycles(src);
		ret = __pvclock_read_cycles(src, rdtsc_ordered());
		flags = src->flags;
	} while (pvclock_read_retry(src, version));

+1 −1
Original line number Diff line number Diff line
@@ -386,7 +386,7 @@ static void synic_init(struct kvm_vcpu_hv_synic *synic)

static u64 get_time_ref_counter(struct kvm *kvm)
{
	return div_u64(get_kernel_ns() + kvm->arch.kvmclock_offset, 100);
	return div_u64(get_kvmclock_ns(kvm), 100);
}

static void stimer_mark_pending(struct kvm_vcpu_hv_stimer *stimer,
+36 −12
Original line number Diff line number Diff line
@@ -1431,7 +1431,7 @@ void kvm_write_tsc(struct kvm_vcpu *vcpu, struct msr_data *msr)

	raw_spin_lock_irqsave(&kvm->arch.tsc_write_lock, flags);
	offset = kvm_compute_tsc_offset(vcpu, data);
	ns = get_kernel_ns();
	ns = ktime_get_boot_ns();
	elapsed = ns - kvm->arch.last_tsc_nsec;

	if (vcpu->arch.virtual_tsc_khz) {
@@ -1722,6 +1722,34 @@ static void kvm_gen_update_masterclock(struct kvm *kvm)
#endif
}

static u64 __get_kvmclock_ns(struct kvm *kvm)
{
	struct kvm_vcpu *vcpu = kvm_get_vcpu(kvm, 0);
	struct kvm_arch *ka = &kvm->arch;
	s64 ns;

	if (vcpu->arch.hv_clock.flags & PVCLOCK_TSC_STABLE_BIT) {
		u64 tsc = kvm_read_l1_tsc(vcpu, rdtsc());
		ns = __pvclock_read_cycles(&vcpu->arch.hv_clock, tsc);
	} else {
		ns = ktime_get_boot_ns() + ka->kvmclock_offset;
	}

	return ns;
}

u64 get_kvmclock_ns(struct kvm *kvm)
{
	unsigned long flags;
	s64 ns;

	local_irq_save(flags);
	ns = __get_kvmclock_ns(kvm);
	local_irq_restore(flags);

	return ns;
}

static void kvm_setup_pvclock_page(struct kvm_vcpu *v)
{
	struct kvm_vcpu_arch *vcpu = &v->arch;
@@ -1811,7 +1839,7 @@ static int kvm_guest_time_update(struct kvm_vcpu *v)
	}
	if (!use_master_clock) {
		host_tsc = rdtsc();
		kernel_ns = get_kernel_ns();
		kernel_ns = ktime_get_boot_ns();
	}

	tsc_timestamp = kvm_read_l1_tsc(v, host_tsc);
@@ -4054,7 +4082,6 @@ long kvm_arch_vm_ioctl(struct file *filp,
	case KVM_SET_CLOCK: {
		struct kvm_clock_data user_ns;
		u64 now_ns;
		s64 delta;

		r = -EFAULT;
		if (copy_from_user(&user_ns, argp, sizeof(user_ns)))
@@ -4066,10 +4093,9 @@ long kvm_arch_vm_ioctl(struct file *filp,

		r = 0;
		local_irq_disable();
		now_ns = get_kernel_ns();
		delta = user_ns.clock - now_ns;
		now_ns = __get_kvmclock_ns(kvm);
		kvm->arch.kvmclock_offset += user_ns.clock - now_ns;
		local_irq_enable();
		kvm->arch.kvmclock_offset = delta;
		kvm_gen_update_masterclock(kvm);
		break;
	}
@@ -4077,10 +4103,8 @@ long kvm_arch_vm_ioctl(struct file *filp,
		struct kvm_clock_data user_ns;
		u64 now_ns;

		local_irq_disable();
		now_ns = get_kernel_ns();
		user_ns.clock = kvm->arch.kvmclock_offset + now_ns;
		local_irq_enable();
		now_ns = get_kvmclock_ns(kvm);
		user_ns.clock = now_ns;
		user_ns.flags = 0;
		memset(&user_ns.pad, 0, sizeof(user_ns.pad));

@@ -7544,7 +7568,7 @@ int kvm_arch_hardware_enable(void)
	 * before any KVM threads can be running.  Unfortunately, we can't
	 * bring the TSCs fully up to date with real time, as we aren't yet far
	 * enough into CPU bringup that we know how much real time has actually
	 * elapsed; our helper function, get_kernel_ns() will be using boot
	 * elapsed; our helper function, ktime_get_boot_ns() will be using boot
	 * variables that haven't been updated yet.
	 *
	 * So we simply find the maximum observed TSC above, then record the
@@ -7779,7 +7803,7 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
	mutex_init(&kvm->arch.apic_map_lock);
	spin_lock_init(&kvm->arch.pvclock_gtod_sync_lock);

	kvm->arch.kvmclock_offset = -get_kernel_ns();
	kvm->arch.kvmclock_offset = -ktime_get_boot_ns();
	pvclock_update_vm_gtod_copy(kvm);

	INIT_DELAYED_WORK(&kvm->arch.kvmclock_update_work, kvmclock_update_fn);
Loading