Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 857e4099 authored by Joerg Roedel's avatar Joerg Roedel Committed by Avi Kivity
Browse files

KVM: X86: Delegate tsc-offset calculation to architecture code



With TSC scaling in SVM the tsc-offset needs to be
calculated differently. This patch propagates this
calculation into the architecture specific modules so that
this complexity can be handled there.

Signed-off-by: default avatarJoerg Roedel <joerg.roedel@amd.com>
Signed-off-by: default avatarAvi Kivity <avi@redhat.com>
parent 4051b188
Loading
Loading
Loading
Loading
+2 −0
Original line number Diff line number Diff line
@@ -609,6 +609,8 @@ struct kvm_x86_ops {
	void (*set_tsc_khz)(struct kvm_vcpu *vcpu, u32 user_tsc_khz);
	void (*write_tsc_offset)(struct kvm_vcpu *vcpu, u64 offset);

	u64 (*compute_tsc_offset)(struct kvm_vcpu *vcpu, u64 target_tsc);

	void (*get_exit_info)(struct kvm_vcpu *vcpu, u64 *info1, u64 *info2);

	int (*check_intercept)(struct kvm_vcpu *vcpu,
+10 −0
Original line number Diff line number Diff line
@@ -943,6 +943,15 @@ static void svm_adjust_tsc_offset(struct kvm_vcpu *vcpu, s64 adjustment)
	mark_dirty(svm->vmcb, VMCB_INTERCEPTS);
}

static u64 svm_compute_tsc_offset(struct kvm_vcpu *vcpu, u64 target_tsc)
{
	u64 tsc;

	tsc = svm_scale_tsc(vcpu, native_read_tsc());

	return target_tsc - tsc;
}

static void init_vmcb(struct vcpu_svm *svm)
{
	struct vmcb_control_area *control = &svm->vmcb->control;
@@ -4194,6 +4203,7 @@ static struct kvm_x86_ops svm_x86_ops = {
	.set_tsc_khz = svm_set_tsc_khz,
	.write_tsc_offset = svm_write_tsc_offset,
	.adjust_tsc_offset = svm_adjust_tsc_offset,
	.compute_tsc_offset = svm_compute_tsc_offset,

	.set_tdp_cr3 = set_tdp_cr3,

+6 −0
Original line number Diff line number Diff line
@@ -1184,6 +1184,11 @@ static void vmx_adjust_tsc_offset(struct kvm_vcpu *vcpu, s64 adjustment)
	vmcs_write64(TSC_OFFSET, offset + adjustment);
}

static u64 vmx_compute_tsc_offset(struct kvm_vcpu *vcpu, u64 target_tsc)
{
	return target_tsc - native_read_tsc();
}

/*
 * Reads an msr value (of 'msr_index') into 'pdata'.
 * Returns 0 on success, non-0 otherwise.
@@ -4510,6 +4515,7 @@ static struct kvm_x86_ops vmx_x86_ops = {
	.set_tsc_khz = vmx_set_tsc_khz,
	.write_tsc_offset = vmx_write_tsc_offset,
	.adjust_tsc_offset = vmx_adjust_tsc_offset,
	.compute_tsc_offset = vmx_compute_tsc_offset,

	.set_tdp_cr3 = vmx_set_cr3,

+5 −5
Original line number Diff line number Diff line
@@ -977,7 +977,7 @@ static u64 vcpu_tsc_khz(struct kvm_vcpu *vcpu)
		return __this_cpu_read(cpu_tsc_khz);
}

static inline u64 nsec_to_cycles(u64 nsec)
static inline u64 nsec_to_cycles(struct kvm_vcpu *vcpu, u64 nsec)
{
	u64 ret;

@@ -985,7 +985,7 @@ static inline u64 nsec_to_cycles(u64 nsec)
	if (kvm_tsc_changes_freq())
		printk_once(KERN_WARNING
		 "kvm: unreliable cycle conversion on adjustable rate TSC\n");
	ret = nsec * __this_cpu_read(cpu_tsc_khz);
	ret = nsec * vcpu_tsc_khz(vcpu);
	do_div(ret, USEC_PER_SEC);
	return ret;
}
@@ -1015,7 +1015,7 @@ void kvm_write_tsc(struct kvm_vcpu *vcpu, u64 data)
	s64 sdiff;

	raw_spin_lock_irqsave(&kvm->arch.tsc_write_lock, flags);
	offset = data - native_read_tsc();
	offset = kvm_x86_ops->compute_tsc_offset(vcpu, data);
	ns = get_kernel_ns();
	elapsed = ns - kvm->arch.last_tsc_nsec;
	sdiff = data - kvm->arch.last_tsc_write;
@@ -1031,13 +1031,13 @@ void kvm_write_tsc(struct kvm_vcpu *vcpu, u64 data)
	 * In that case, for a reliable TSC, we can match TSC offsets,
	 * or make a best guest using elapsed value.
	 */
	if (sdiff < nsec_to_cycles(5ULL * NSEC_PER_SEC) &&
	if (sdiff < nsec_to_cycles(vcpu, 5ULL * NSEC_PER_SEC) &&
	    elapsed < 5ULL * NSEC_PER_SEC) {
		if (!check_tsc_unstable()) {
			offset = kvm->arch.last_tsc_offset;
			pr_debug("kvm: matched tsc offset for %llu\n", data);
		} else {
			u64 delta = nsec_to_cycles(elapsed);
			u64 delta = nsec_to_cycles(vcpu, elapsed);
			offset += delta;
			pr_debug("kvm: adjusted tsc offset by %llu\n", delta);
		}