Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit ea26e4ec authored by Paolo Bonzini's avatar Paolo Bonzini
Browse files

KVM: x86: drop TSC offsetting kvm_x86_ops to fix KVM_GET/SET_CLOCK



Since commit a545ab6a ("kvm: x86: add tsc_offset field to struct
kvm_vcpu_arch", 2016-09-07) the offset between host and L1 TSC is
cached and need not be fished out of the VMCS or VMCB.  This means
that we can implement adjust_tsc_offset_guest and read_l1_tsc
entirely in generic code.  The simplification is particularly
significant for VMX code, where vmx->nested.vmcs01_tsc_offset
was duplicating what is now in vcpu->arch.tsc_offset.  Therefore
the vmcs01_tsc_offset can be dropped completely.

More importantly, this fixes KVM_GET_CLOCK/KVM_SET_CLOCK
which, after commit 108b249c ("KVM: x86: introduce get_kvmclock_ns",
2016-09-01) called read_l1_tsc while the VMCS was not loaded.
It thus returned bogus values on Intel CPUs.

Fixes: 108b249c
Reported-by: default avatarRoman Kagan <rkagan@virtuozzo.com>
Reviewed-by: default avatarRadim Krčmář <rkrcmar@redhat.com>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent bd768e14
Loading
Loading
Loading
Loading
+0 −3
Original line number Diff line number Diff line
@@ -948,7 +948,6 @@ struct kvm_x86_ops {
	int (*get_lpage_level)(void);
	bool (*rdtscp_supported)(void);
	bool (*invpcid_supported)(void);
	void (*adjust_tsc_offset_guest)(struct kvm_vcpu *vcpu, s64 adjustment);

	void (*set_tdp_cr3)(struct kvm_vcpu *vcpu, unsigned long cr3);

@@ -958,8 +957,6 @@ struct kvm_x86_ops {

	void (*write_tsc_offset)(struct kvm_vcpu *vcpu, u64 offset);

	u64 (*read_l1_tsc)(struct kvm_vcpu *vcpu, u64 host_tsc);

	void (*get_exit_info)(struct kvm_vcpu *vcpu, u64 *info1, u64 *info2);

	int (*check_intercept)(struct kvm_vcpu *vcpu,
+0 −23
Original line number Diff line number Diff line
@@ -1138,21 +1138,6 @@ static void svm_write_tsc_offset(struct kvm_vcpu *vcpu, u64 offset)
	mark_dirty(svm->vmcb, VMCB_INTERCEPTS);
}

static void svm_adjust_tsc_offset_guest(struct kvm_vcpu *vcpu, s64 adjustment)
{
	struct vcpu_svm *svm = to_svm(vcpu);

	svm->vmcb->control.tsc_offset += adjustment;
	if (is_guest_mode(vcpu))
		svm->nested.hsave->control.tsc_offset += adjustment;
	else
		trace_kvm_write_tsc_offset(vcpu->vcpu_id,
				     svm->vmcb->control.tsc_offset - adjustment,
				     svm->vmcb->control.tsc_offset);

	mark_dirty(svm->vmcb, VMCB_INTERCEPTS);
}

static void avic_init_vmcb(struct vcpu_svm *svm)
{
	struct vmcb *vmcb = svm->vmcb;
@@ -3449,12 +3434,6 @@ static int cr8_write_interception(struct vcpu_svm *svm)
	return 0;
}

static u64 svm_read_l1_tsc(struct kvm_vcpu *vcpu, u64 host_tsc)
{
	struct vmcb *vmcb = get_host_vmcb(to_svm(vcpu));
	return vmcb->control.tsc_offset + host_tsc;
}

static int svm_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
{
	struct vcpu_svm *svm = to_svm(vcpu);
@@ -5422,8 +5401,6 @@ static struct kvm_x86_ops svm_x86_ops __ro_after_init = {
	.has_wbinvd_exit = svm_has_wbinvd_exit,

	.write_tsc_offset = svm_write_tsc_offset,
	.adjust_tsc_offset_guest = svm_adjust_tsc_offset_guest,
	.read_l1_tsc = svm_read_l1_tsc,

	.set_tdp_cr3 = set_tdp_cr3,

+3 −36
Original line number Diff line number Diff line
@@ -421,7 +421,6 @@ struct nested_vmx {
	/* vmcs02_list cache of VMCSs recently used to run L2 guests */
	struct list_head vmcs02_pool;
	int vmcs02_num;
	u64 vmcs01_tsc_offset;
	bool change_vmcs01_virtual_x2apic_mode;
	/* L2 must run next, and mustn't decide to exit to L1. */
	bool nested_run_pending;
@@ -2604,20 +2603,6 @@ static u64 guest_read_tsc(struct kvm_vcpu *vcpu)
	return kvm_scale_tsc(vcpu, host_tsc) + tsc_offset;
}

/*
 * Like guest_read_tsc, but always returns L1's notion of the timestamp
 * counter, even if a nested guest (L2) is currently running.
 */
static u64 vmx_read_l1_tsc(struct kvm_vcpu *vcpu, u64 host_tsc)
{
	u64 tsc_offset;

	tsc_offset = is_guest_mode(vcpu) ?
		to_vmx(vcpu)->nested.vmcs01_tsc_offset :
		vmcs_read64(TSC_OFFSET);
	return host_tsc + tsc_offset;
}

/*
 * writes 'offset' into guest's timestamp counter offset register
 */
@@ -2631,7 +2616,6 @@ static void vmx_write_tsc_offset(struct kvm_vcpu *vcpu, u64 offset)
		 * to the newly set TSC to get L2's TSC.
		 */
		struct vmcs12 *vmcs12;
		to_vmx(vcpu)->nested.vmcs01_tsc_offset = offset;
		/* recalculate vmcs02.TSC_OFFSET: */
		vmcs12 = get_vmcs12(vcpu);
		vmcs_write64(TSC_OFFSET, offset +
@@ -2644,19 +2628,6 @@ static void vmx_write_tsc_offset(struct kvm_vcpu *vcpu, u64 offset)
	}
}

static void vmx_adjust_tsc_offset_guest(struct kvm_vcpu *vcpu, s64 adjustment)
{
	u64 offset = vmcs_read64(TSC_OFFSET);

	vmcs_write64(TSC_OFFSET, offset + adjustment);
	if (is_guest_mode(vcpu)) {
		/* Even when running L2, the adjustment needs to apply to L1 */
		to_vmx(vcpu)->nested.vmcs01_tsc_offset += adjustment;
	} else
		trace_kvm_write_tsc_offset(vcpu->vcpu_id, offset,
					   offset + adjustment);
}

static bool guest_cpuid_has_vmx(struct kvm_vcpu *vcpu)
{
	struct kvm_cpuid_entry2 *best = kvm_find_cpuid_entry(vcpu, 1, 0);
@@ -10061,9 +10032,9 @@ static void prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)

	if (vmcs12->cpu_based_vm_exec_control & CPU_BASED_USE_TSC_OFFSETING)
		vmcs_write64(TSC_OFFSET,
			vmx->nested.vmcs01_tsc_offset + vmcs12->tsc_offset);
			vcpu->arch.tsc_offset + vmcs12->tsc_offset);
	else
		vmcs_write64(TSC_OFFSET, vmx->nested.vmcs01_tsc_offset);
		vmcs_write64(TSC_OFFSET, vcpu->arch.tsc_offset);
	if (kvm_has_tsc_control)
		decache_tsc_multiplier(vmx);

@@ -10293,8 +10264,6 @@ static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch)

	enter_guest_mode(vcpu);

	vmx->nested.vmcs01_tsc_offset = vmcs_read64(TSC_OFFSET);

	if (!(vmcs12->vm_entry_controls & VM_ENTRY_LOAD_DEBUG_CONTROLS))
		vmx->nested.vmcs01_debugctl = vmcs_read64(GUEST_IA32_DEBUGCTL);

@@ -10818,7 +10787,7 @@ static void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 exit_reason,
	load_vmcs12_host_state(vcpu, vmcs12);

	/* Update any VMCS fields that might have changed while L2 ran */
	vmcs_write64(TSC_OFFSET, vmx->nested.vmcs01_tsc_offset);
	vmcs_write64(TSC_OFFSET, vcpu->arch.tsc_offset);
	if (vmx->hv_deadline_tsc == -1)
		vmcs_clear_bits(PIN_BASED_VM_EXEC_CONTROL,
				PIN_BASED_VMX_PREEMPTION_TIMER);
@@ -11339,8 +11308,6 @@ static struct kvm_x86_ops vmx_x86_ops __ro_after_init = {
	.has_wbinvd_exit = cpu_has_vmx_wbinvd_exit,

	.write_tsc_offset = vmx_write_tsc_offset,
	.adjust_tsc_offset_guest = vmx_adjust_tsc_offset_guest,
	.read_l1_tsc = vmx_read_l1_tsc,

	.set_tdp_cr3 = vmx_set_cr3,

+3 −3
Original line number Diff line number Diff line
@@ -1409,7 +1409,7 @@ static u64 kvm_compute_tsc_offset(struct kvm_vcpu *vcpu, u64 target_tsc)

u64 kvm_read_l1_tsc(struct kvm_vcpu *vcpu, u64 host_tsc)
{
	return kvm_x86_ops->read_l1_tsc(vcpu, kvm_scale_tsc(vcpu, host_tsc));
	return vcpu->arch.tsc_offset + kvm_scale_tsc(vcpu, host_tsc);
}
EXPORT_SYMBOL_GPL(kvm_read_l1_tsc);

@@ -1547,7 +1547,7 @@ EXPORT_SYMBOL_GPL(kvm_write_tsc);
static inline void adjust_tsc_offset_guest(struct kvm_vcpu *vcpu,
					   s64 adjustment)
{
	kvm_x86_ops->adjust_tsc_offset_guest(vcpu, adjustment);
	kvm_vcpu_write_tsc_offset(vcpu, vcpu->arch.tsc_offset + adjustment);
}

static inline void adjust_tsc_offset_host(struct kvm_vcpu *vcpu, s64 adjustment)
@@ -1555,7 +1555,7 @@ static inline void adjust_tsc_offset_host(struct kvm_vcpu *vcpu, s64 adjustment)
	if (vcpu->arch.tsc_scaling_ratio != kvm_default_tsc_scaling_ratio)
		WARN_ON(adjustment < 0);
	adjustment = kvm_scale_tsc(vcpu, (u64) adjustment);
	kvm_x86_ops->adjust_tsc_offset_guest(vcpu, adjustment);
	adjust_tsc_offset_guest(vcpu, adjustment);
}

#ifdef CONFIG_X86_64