Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 4c088493 authored by Raghavendra K T's avatar Raghavendra K T Committed by Avi Kivity
Browse files

KVM: Note down when cpu relax intercepted or pause loop exited



Noting pause loop exited vcpu or cpu relax intercepted helps in
filtering right candidate to yield. Wrong selection of vcpu;
i.e., a vcpu that just did a pl-exit or cpu relax intercepted may
contribute to performance degradation.

Signed-off-by: default avatarRaghavendra K T <raghavendra.kt@linux.vnet.ibm.com>
Reviewed-by: default avatarMarcelo Tosatti <mtosatti@redhat.com>
Reviewed-by: default avatarRik van Riel <riel@redhat.com>
Tested-by: Christian Borntraeger <borntraeger@de.ibm.com> # on s390x
Signed-off-by: default avatarAvi Kivity <avi@redhat.com>
parent f2a74347
Loading
Loading
Loading
Loading
+34 −0
Original line number Original line Diff line number Diff line
@@ -183,6 +183,18 @@ struct kvm_vcpu {
	} async_pf;
	} async_pf;
#endif
#endif


#ifdef CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT
	/*
	 * Cpu relax intercept or pause loop exit optimization
	 * in_spin_loop: set when a vcpu does a pause loop exit
	 *  or cpu relax intercepted.
	 * dy_eligible: indicates whether vcpu is eligible for directed yield.
	 */
	struct {
		bool in_spin_loop;
		bool dy_eligible;
	} spin_loop;
#endif
	struct kvm_vcpu_arch arch;
	struct kvm_vcpu_arch arch;
};
};


@@ -898,5 +910,27 @@ static inline bool kvm_check_request(int req, struct kvm_vcpu *vcpu)
	}
	}
}
}


#ifdef CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT

static inline void kvm_vcpu_set_in_spin_loop(struct kvm_vcpu *vcpu, bool val)
{
	vcpu->spin_loop.in_spin_loop = val;
}
static inline void kvm_vcpu_set_dy_eligible(struct kvm_vcpu *vcpu, bool val)
{
	vcpu->spin_loop.dy_eligible = val;
}

#else /* !CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT */

static inline void kvm_vcpu_set_in_spin_loop(struct kvm_vcpu *vcpu, bool val)
{
}

static inline void kvm_vcpu_set_dy_eligible(struct kvm_vcpu *vcpu, bool val)
{
}

#endif /* CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT */
#endif
#endif
+5 −0
Original line number Original line Diff line number Diff line
@@ -239,6 +239,9 @@ int kvm_vcpu_init(struct kvm_vcpu *vcpu, struct kvm *kvm, unsigned id)
	}
	}
	vcpu->run = page_address(page);
	vcpu->run = page_address(page);


	kvm_vcpu_set_in_spin_loop(vcpu, false);
	kvm_vcpu_set_dy_eligible(vcpu, false);

	r = kvm_arch_vcpu_init(vcpu);
	r = kvm_arch_vcpu_init(vcpu);
	if (r < 0)
	if (r < 0)
		goto fail_free_run;
		goto fail_free_run;
@@ -1585,6 +1588,7 @@ void kvm_vcpu_on_spin(struct kvm_vcpu *me)
	int pass;
	int pass;
	int i;
	int i;


	kvm_vcpu_set_in_spin_loop(me, true);
	/*
	/*
	 * We boost the priority of a VCPU that is runnable but not
	 * We boost the priority of a VCPU that is runnable but not
	 * currently running, because it got preempted by something
	 * currently running, because it got preempted by something
@@ -1610,6 +1614,7 @@ void kvm_vcpu_on_spin(struct kvm_vcpu *me)
			}
			}
		}
		}
	}
	}
	kvm_vcpu_set_in_spin_loop(me, false);
}
}
EXPORT_SYMBOL_GPL(kvm_vcpu_on_spin);
EXPORT_SYMBOL_GPL(kvm_vcpu_on_spin);