Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 5d5b99cd authored by Paul Mackerras's avatar Paul Mackerras Committed by Alexander Graf
Browse files

KVM: PPC: Book3S HV: Get rid of vcore nap_count and n_woken



We can tell when a secondary thread has finished running a guest by
the fact that it clears its kvm_hstate.kvm_vcpu pointer, so there
is no real need for the nap_count field in the kvmppc_vcore struct.
This changes kvmppc_wait_for_nap to poll the kvm_hstate.kvm_vcpu
pointers of the secondary threads rather than polling vc->nap_count.
Besides reducing the size of the kvmppc_vcore struct by 8 bytes,
this also means that we can tell which secondary threads have got
stuck and thus print a more informative error message.

Signed-off-by: default avatarPaul Mackerras <paulus@samba.org>
Signed-off-by: default avatarAlexander Graf <agraf@suse.de>
parent 25fedfca
Loading
Loading
Loading
Loading
+0 −2
Original line number Diff line number Diff line
@@ -272,8 +272,6 @@ struct kvmppc_vcore {
	int n_runnable;
	int num_threads;
	int entry_exit_count;
	int n_woken;
	int nap_count;
	int napping_threads;
	int first_vcpuid;
	u16 pcpu;
+0 −1
Original line number Diff line number Diff line
@@ -563,7 +563,6 @@ int main(void)
	DEFINE(VCPU_WORT, offsetof(struct kvm_vcpu, arch.wort));
	DEFINE(VCPU_SHADOW_SRR1, offsetof(struct kvm_vcpu, arch.shadow_srr1));
	DEFINE(VCORE_ENTRY_EXIT, offsetof(struct kvmppc_vcore, entry_exit_count));
	DEFINE(VCORE_NAP_COUNT, offsetof(struct kvmppc_vcore, nap_count));
	DEFINE(VCORE_IN_GUEST, offsetof(struct kvmppc_vcore, in_guest));
	DEFINE(VCORE_NAPPING_THREADS, offsetof(struct kvmppc_vcore, napping_threads));
	DEFINE(VCORE_KVM, offsetof(struct kvmppc_vcore, kvm));
+27 −20
Original line number Diff line number Diff line
@@ -1729,8 +1729,10 @@ static int kvmppc_grab_hwthread(int cpu)
	tpaca = &paca[cpu];

	/* Ensure the thread won't go into the kernel if it wakes */
	tpaca->kvm_hstate.hwthread_req = 1;
	tpaca->kvm_hstate.kvm_vcpu = NULL;
	tpaca->kvm_hstate.napping = 0;
	smp_wmb();
	tpaca->kvm_hstate.hwthread_req = 1;

	/*
	 * If the thread is already executing in the kernel (e.g. handling
@@ -1773,35 +1775,43 @@ static void kvmppc_start_thread(struct kvm_vcpu *vcpu)
	}
	cpu = vc->pcpu + vcpu->arch.ptid;
	tpaca = &paca[cpu];
	tpaca->kvm_hstate.kvm_vcpu = vcpu;
	tpaca->kvm_hstate.kvm_vcore = vc;
	tpaca->kvm_hstate.ptid = vcpu->arch.ptid;
	vcpu->cpu = vc->pcpu;
	/* Order stores to hstate.kvm_vcore etc. before store to kvm_vcpu */
	smp_wmb();
	tpaca->kvm_hstate.kvm_vcpu = vcpu;
#if defined(CONFIG_PPC_ICP_NATIVE) && defined(CONFIG_SMP)
	if (cpu != smp_processor_id()) {
	if (cpu != smp_processor_id())
		xics_wake_cpu(cpu);
		if (vcpu->arch.ptid)
			++vc->n_woken;
	}
#endif
}

static void kvmppc_wait_for_nap(struct kvmppc_vcore *vc)
static void kvmppc_wait_for_nap(void)
{
	int i;
	int cpu = smp_processor_id();
	int i, loops;

	HMT_low();
	i = 0;
	while (vc->nap_count < vc->n_woken) {
		if (++i >= 1000000) {
			pr_err("kvmppc_wait_for_nap timeout %d %d\n",
			       vc->nap_count, vc->n_woken);
	for (loops = 0; loops < 1000000; ++loops) {
		/*
		 * Check if all threads are finished.
		 * We set the vcpu pointer when starting a thread
		 * and the thread clears it when finished, so we look
		 * for any threads that still have a non-NULL vcpu ptr.
		 */
		for (i = 1; i < threads_per_subcore; ++i)
			if (paca[cpu + i].kvm_hstate.kvm_vcpu)
				break;
		if (i == threads_per_subcore) {
			HMT_medium();
			return;
		}
		cpu_relax();
		HMT_low();
	}
	HMT_medium();
	for (i = 1; i < threads_per_subcore; ++i)
		if (paca[cpu + i].kvm_hstate.kvm_vcpu)
			pr_err("KVM: CPU %d seems to be stuck\n", cpu + i);
}

/*
@@ -1942,8 +1952,6 @@ static void kvmppc_run_core(struct kvmppc_vcore *vc)
	/*
	 * Initialize *vc.
	 */
	vc->n_woken = 0;
	vc->nap_count = 0;
	vc->entry_exit_count = 0;
	vc->preempt_tb = TB_NIL;
	vc->in_guest = 0;
@@ -2002,8 +2010,7 @@ static void kvmppc_run_core(struct kvmppc_vcore *vc)
	list_for_each_entry(vcpu, &vc->runnable_threads, arch.run_list)
		vcpu->cpu = -1;
	/* wait for secondary threads to finish writing their state to memory */
	if (vc->nap_count < vc->n_woken)
		kvmppc_wait_for_nap(vc);
	kvmppc_wait_for_nap();
	for (i = 0; i < threads_per_subcore; ++i)
		kvmppc_release_hwthread(vc->pcpu + i);
	/* prevent other vcpu threads from doing kvmppc_start_thread() now */
+7 −12
Original line number Diff line number Diff line
@@ -292,26 +292,21 @@ kvm_secondary_got_guest:
	ld	r6, PACA_DSCR(r13)
	std	r6, HSTATE_DSCR(r13)

	/* Order load of vcore, ptid etc. after load of vcpu */
	lwsync
	bl	kvmppc_hv_entry

	/* Back from the guest, go back to nap */
	/* Clear our vcpu pointer so we don't come back in early */
	li	r0, 0
	std	r0, HSTATE_KVM_VCPU(r13)
	/*
	 * Make sure we clear HSTATE_KVM_VCPU(r13) before incrementing
	 * the nap_count, because once the increment to nap_count is
	 * visible we could be given another vcpu.
	 * Once we clear HSTATE_KVM_VCPU(r13), the code in
	 * kvmppc_run_core() is going to assume that all our vcpu
	 * state is visible in memory.  This lwsync makes sure
	 * that that is true.
	 */
	lwsync

	/* increment the nap count and then go to nap mode */
	ld	r4, HSTATE_KVM_VCORE(r13)
	addi	r4, r4, VCORE_NAP_COUNT
51:	lwarx	r3, 0, r4
	addi	r3, r3, 1
	stwcx.	r3, 0, r4
	bne	51b
	std	r0, HSTATE_KVM_VCPU(r13)

/*
 * At this point we have finished executing in the guest.