Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 07c1419a authored by Haozhong Zhang's avatar Haozhong Zhang Committed by Paolo Bonzini
Browse files

KVM: x86: Replace call-back compute_tsc_offset() with a common function



Both VMX and SVM calculate the tsc-offset in the same way, so this
patch removes the call-back compute_tsc_offset() and replaces it with a
common function kvm_compute_tsc_offset().

Signed-off-by: default avatarHaozhong Zhang <haozhong.zhang@intel.com>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent 381d585c
Loading
Loading
Loading
Loading
+0 −1
Original line number Diff line number Diff line
@@ -856,7 +856,6 @@ struct kvm_x86_ops {
	u64 (*read_tsc_offset)(struct kvm_vcpu *vcpu);
	void (*write_tsc_offset)(struct kvm_vcpu *vcpu, u64 offset);

	u64 (*compute_tsc_offset)(struct kvm_vcpu *vcpu, u64 target_tsc);
	u64 (*read_l1_tsc)(struct kvm_vcpu *vcpu, u64 host_tsc);

	void (*get_exit_info)(struct kvm_vcpu *vcpu, u64 *info1, u64 *info2);
+0 −10
Original line number Diff line number Diff line
@@ -1004,15 +1004,6 @@ static void svm_adjust_tsc_offset(struct kvm_vcpu *vcpu, s64 adjustment, bool ho
	mark_dirty(svm->vmcb, VMCB_INTERCEPTS);
}

static u64 svm_compute_tsc_offset(struct kvm_vcpu *vcpu, u64 target_tsc)
{
	u64 tsc;

	tsc = kvm_scale_tsc(vcpu, rdtsc());

	return target_tsc - tsc;
}

static void init_vmcb(struct vcpu_svm *svm)
{
	struct vmcb_control_area *control = &svm->vmcb->control;
@@ -4370,7 +4361,6 @@ static struct kvm_x86_ops svm_x86_ops = {
	.read_tsc_offset = svm_read_tsc_offset,
	.write_tsc_offset = svm_write_tsc_offset,
	.adjust_tsc_offset = svm_adjust_tsc_offset,
	.compute_tsc_offset = svm_compute_tsc_offset,
	.read_l1_tsc = svm_read_l1_tsc,

	.set_tdp_cr3 = set_tdp_cr3,
+0 −6
Original line number Diff line number Diff line
@@ -2426,11 +2426,6 @@ static void vmx_adjust_tsc_offset(struct kvm_vcpu *vcpu, s64 adjustment, bool ho
					   offset + adjustment);
}

static u64 vmx_compute_tsc_offset(struct kvm_vcpu *vcpu, u64 target_tsc)
{
	return target_tsc - rdtsc();
}

static bool guest_cpuid_has_vmx(struct kvm_vcpu *vcpu)
{
	struct kvm_cpuid_entry2 *best = kvm_find_cpuid_entry(vcpu, 1, 0);
@@ -10813,7 +10808,6 @@ static struct kvm_x86_ops vmx_x86_ops = {
	.read_tsc_offset = vmx_read_tsc_offset,
	.write_tsc_offset = vmx_write_tsc_offset,
	.adjust_tsc_offset = vmx_adjust_tsc_offset,
	.compute_tsc_offset = vmx_compute_tsc_offset,
	.read_l1_tsc = vmx_read_l1_tsc,

	.set_tdp_cr3 = vmx_set_cr3,
+12 −3
Original line number Diff line number Diff line
@@ -1392,6 +1392,15 @@ u64 kvm_scale_tsc(struct kvm_vcpu *vcpu, u64 tsc)
}
EXPORT_SYMBOL_GPL(kvm_scale_tsc);

static u64 kvm_compute_tsc_offset(struct kvm_vcpu *vcpu, u64 target_tsc)
{
	u64 tsc;

	tsc = kvm_scale_tsc(vcpu, rdtsc());

	return target_tsc - tsc;
}

void kvm_write_tsc(struct kvm_vcpu *vcpu, struct msr_data *msr)
{
	struct kvm *kvm = vcpu->kvm;
@@ -1403,7 +1412,7 @@ void kvm_write_tsc(struct kvm_vcpu *vcpu, struct msr_data *msr)
	u64 data = msr->data;

	raw_spin_lock_irqsave(&kvm->arch.tsc_write_lock, flags);
	offset = kvm_x86_ops->compute_tsc_offset(vcpu, data);
	offset = kvm_compute_tsc_offset(vcpu, data);
	ns = get_kernel_ns();
	elapsed = ns - kvm->arch.last_tsc_nsec;

@@ -1460,7 +1469,7 @@ void kvm_write_tsc(struct kvm_vcpu *vcpu, struct msr_data *msr)
		} else {
			u64 delta = nsec_to_cycles(vcpu, elapsed);
			data += delta;
			offset = kvm_x86_ops->compute_tsc_offset(vcpu, data);
			offset = kvm_compute_tsc_offset(vcpu, data);
			pr_debug("kvm: adjusted tsc offset by %llu\n", delta);
		}
		matched = true;
@@ -2687,7 +2696,7 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
		if (tsc_delta < 0)
			mark_tsc_unstable("KVM discovered backwards TSC");
		if (check_tsc_unstable()) {
			u64 offset = kvm_x86_ops->compute_tsc_offset(vcpu,
			u64 offset = kvm_compute_tsc_offset(vcpu,
						vcpu->arch.last_guest_tsc);
			kvm_x86_ops->write_tsc_offset(vcpu, offset);
			vcpu->arch.tsc_catchup = 1;