Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 199b5763 authored by Longpeng(Mike)'s avatar Longpeng(Mike) Committed by Paolo Bonzini
Browse files

KVM: add spinlock optimization framework



If a vcpu exits due to request a user mode spinlock, then
the spinlock-holder may be preempted in user mode or kernel mode.
(Note that not all architectures trap spin loops in user mode,
only AMD x86 and ARM/ARM64 currently do).

But if a vcpu exits in kernel mode, then the holder must be
preempted in kernel mode, so we should choose a vcpu in kernel mode
as a more likely candidate for the lock holder.

This introduces kvm_arch_vcpu_in_kernel() to decide whether the
vcpu is in kernel-mode when it's preempted.  kvm_vcpu_on_spin's
new argument says the same of the spinning VCPU.

Signed-off-by: default avatarLongpeng(Mike) <longpeng2@huawei.com>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent 1b4d56b8
Loading
Loading
Loading
Loading
+1 −1
Original line number Original line Diff line number Diff line
@@ -67,7 +67,7 @@ static int kvm_handle_wfx(struct kvm_vcpu *vcpu, struct kvm_run *run)
	if (kvm_vcpu_get_hsr(vcpu) & HSR_WFI_IS_WFE) {
	if (kvm_vcpu_get_hsr(vcpu) & HSR_WFI_IS_WFE) {
		trace_kvm_wfx(*vcpu_pc(vcpu), true);
		trace_kvm_wfx(*vcpu_pc(vcpu), true);
		vcpu->stat.wfe_exit_stat++;
		vcpu->stat.wfe_exit_stat++;
		kvm_vcpu_on_spin(vcpu);
		kvm_vcpu_on_spin(vcpu, false);
	} else {
	} else {
		trace_kvm_wfx(*vcpu_pc(vcpu), false);
		trace_kvm_wfx(*vcpu_pc(vcpu), false);
		vcpu->stat.wfi_exit_stat++;
		vcpu->stat.wfi_exit_stat++;
+1 −1
Original line number Original line Diff line number Diff line
@@ -84,7 +84,7 @@ static int kvm_handle_wfx(struct kvm_vcpu *vcpu, struct kvm_run *run)
	if (kvm_vcpu_get_hsr(vcpu) & ESR_ELx_WFx_ISS_WFE) {
	if (kvm_vcpu_get_hsr(vcpu) & ESR_ELx_WFx_ISS_WFE) {
		trace_kvm_wfx_arm64(*vcpu_pc(vcpu), true);
		trace_kvm_wfx_arm64(*vcpu_pc(vcpu), true);
		vcpu->stat.wfe_exit_stat++;
		vcpu->stat.wfe_exit_stat++;
		kvm_vcpu_on_spin(vcpu);
		kvm_vcpu_on_spin(vcpu, false);
	} else {
	} else {
		trace_kvm_wfx_arm64(*vcpu_pc(vcpu), false);
		trace_kvm_wfx_arm64(*vcpu_pc(vcpu), false);
		vcpu->stat.wfi_exit_stat++;
		vcpu->stat.wfi_exit_stat++;
+5 −0
Original line number Original line Diff line number Diff line
@@ -98,6 +98,11 @@ int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
	return !!(vcpu->arch.pending_exceptions);
	return !!(vcpu->arch.pending_exceptions);
}
}


bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu *vcpu)
{
	return false;
}

int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
{
{
	return 1;
	return 1;
+5 −0
Original line number Original line Diff line number Diff line
@@ -58,6 +58,11 @@ int kvm_arch_vcpu_runnable(struct kvm_vcpu *v)
	return !!(v->arch.pending_exceptions) || kvm_request_pending(v);
	return !!(v->arch.pending_exceptions) || kvm_request_pending(v);
}
}


bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu *vcpu)
{
	return false;
}

int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
{
{
	return 1;
	return 1;
+1 −1
Original line number Original line Diff line number Diff line
@@ -150,7 +150,7 @@ static int __diag_time_slice_end(struct kvm_vcpu *vcpu)
{
{
	VCPU_EVENT(vcpu, 5, "%s", "diag time slice end");
	VCPU_EVENT(vcpu, 5, "%s", "diag time slice end");
	vcpu->stat.diagnose_44++;
	vcpu->stat.diagnose_44++;
	kvm_vcpu_on_spin(vcpu);
	kvm_vcpu_on_spin(vcpu, false);
	return 0;
	return 0;
}
}


Loading