Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 58ea6767 authored by Haozhong Zhang's avatar Haozhong Zhang Committed by Paolo Bonzini
Browse files

KVM: x86: Move TSC scaling logic out of call-back adjust_tsc_offset()



For both VMX and SVM, if the 2nd argument of call-back
adjust_tsc_offset() is the host TSC, then adjust_tsc_offset() will scale
it first. This patch moves this common TSC scaling logic to its caller
adjust_tsc_offset_host() and rename the call-back adjust_tsc_offset() to
adjust_tsc_offset_guest().

Signed-off-by: default avatarHaozhong Zhang <haozhong.zhang@intel.com>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent 07c1419a
Loading
Loading
Loading
Loading
+1 −12
Original line number Diff line number Diff line
@@ -845,7 +845,7 @@ struct kvm_x86_ops {
	int (*get_lpage_level)(void);
	bool (*rdtscp_supported)(void);
	bool (*invpcid_supported)(void);
	void (*adjust_tsc_offset)(struct kvm_vcpu *vcpu, s64 adjustment, bool host);
	void (*adjust_tsc_offset_guest)(struct kvm_vcpu *vcpu, s64 adjustment);

	void (*set_tdp_cr3)(struct kvm_vcpu *vcpu, unsigned long cr3);

@@ -922,17 +922,6 @@ struct kvm_arch_async_pf {

extern struct kvm_x86_ops *kvm_x86_ops;

static inline void adjust_tsc_offset_guest(struct kvm_vcpu *vcpu,
					   s64 adjustment)
{
	kvm_x86_ops->adjust_tsc_offset(vcpu, adjustment, false);
}

static inline void adjust_tsc_offset_host(struct kvm_vcpu *vcpu, s64 adjustment)
{
	kvm_x86_ops->adjust_tsc_offset(vcpu, adjustment, true);
}

int kvm_mmu_module_init(void);
void kvm_mmu_module_exit(void);

+2 −8
Original line number Diff line number Diff line
@@ -983,16 +983,10 @@ static void svm_write_tsc_offset(struct kvm_vcpu *vcpu, u64 offset)
	mark_dirty(svm->vmcb, VMCB_INTERCEPTS);
}

static void svm_adjust_tsc_offset(struct kvm_vcpu *vcpu, s64 adjustment, bool host)
static void svm_adjust_tsc_offset_guest(struct kvm_vcpu *vcpu, s64 adjustment)
{
	struct vcpu_svm *svm = to_svm(vcpu);

	if (host) {
		if (vcpu->arch.tsc_scaling_ratio != TSC_RATIO_DEFAULT)
			WARN_ON(adjustment < 0);
		adjustment = kvm_scale_tsc(vcpu, (u64)adjustment);
	}

	svm->vmcb->control.tsc_offset += adjustment;
	if (is_guest_mode(vcpu))
		svm->nested.hsave->control.tsc_offset += adjustment;
@@ -4360,7 +4354,7 @@ static struct kvm_x86_ops svm_x86_ops = {

	.read_tsc_offset = svm_read_tsc_offset,
	.write_tsc_offset = svm_write_tsc_offset,
	.adjust_tsc_offset = svm_adjust_tsc_offset,
	.adjust_tsc_offset_guest = svm_adjust_tsc_offset_guest,
	.read_l1_tsc = svm_read_l1_tsc,

	.set_tdp_cr3 = set_tdp_cr3,
+2 −2
Original line number Diff line number Diff line
@@ -2413,7 +2413,7 @@ static void vmx_write_tsc_offset(struct kvm_vcpu *vcpu, u64 offset)
	}
}

static void vmx_adjust_tsc_offset(struct kvm_vcpu *vcpu, s64 adjustment, bool host)
static void vmx_adjust_tsc_offset_guest(struct kvm_vcpu *vcpu, s64 adjustment)
{
	u64 offset = vmcs_read64(TSC_OFFSET);

@@ -10807,7 +10807,7 @@ static struct kvm_x86_ops vmx_x86_ops = {

	.read_tsc_offset = vmx_read_tsc_offset,
	.write_tsc_offset = vmx_write_tsc_offset,
	.adjust_tsc_offset = vmx_adjust_tsc_offset,
	.adjust_tsc_offset_guest = vmx_adjust_tsc_offset_guest,
	.read_l1_tsc = vmx_read_l1_tsc,

	.set_tdp_cr3 = vmx_set_cr3,
+14 −0
Original line number Diff line number Diff line
@@ -1526,6 +1526,20 @@ void kvm_write_tsc(struct kvm_vcpu *vcpu, struct msr_data *msr)

EXPORT_SYMBOL_GPL(kvm_write_tsc);

static inline void adjust_tsc_offset_guest(struct kvm_vcpu *vcpu,
					   s64 adjustment)
{
	kvm_x86_ops->adjust_tsc_offset_guest(vcpu, adjustment);
}

static inline void adjust_tsc_offset_host(struct kvm_vcpu *vcpu, s64 adjustment)
{
	if (vcpu->arch.tsc_scaling_ratio != kvm_default_tsc_scaling_ratio)
		WARN_ON(adjustment < 0);
	adjustment = kvm_scale_tsc(vcpu, (u64) adjustment);
	kvm_x86_ops->adjust_tsc_offset_guest(vcpu, adjustment);
}

#ifdef CONFIG_X86_64

static cycle_t read_tsc(void)