Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 13989b65 authored by Simon Guo's avatar Simon Guo Committed by Paul Mackerras
Browse files

KVM: PPC: Book3S PR: Add math support for PR KVM HTM



The math registers will be saved into vcpu->arch.fp/vr and corresponding
vcpu->arch.fp_tm/vr_tm area.

We flush or giveup the math regs into vcpu->arch.fp/vr before saving
transaction. After transaction is restored, the math regs will be loaded
back into regs.

If there is a FP/VEC/VSX unavailable exception during transaction active
state, the math checkpoint content might be incorrect and we need to do
treclaim./load the correct checkpoint val/trechkpt. sequence to retry the
transaction. That will make our solution complicated. To solve this issue,
we always make the hardware guest MSR math bits (shadow_msr) consistent
with the MSR val which guest sees (kvmppc_get_msr()) when guest msr is
with tm enabled. Then all FP/VEC/VSX unavailable exception can be delivered
to guest and guest handles the exception by itself.

Signed-off-by: default avatarSimon Guo <wei.guo.simon@gmail.com>
Signed-off-by: default avatarPaul Mackerras <paulus@ozlabs.org>
parent 8d2e2fc5
Loading
Loading
Loading
Loading
+35 −0
Original line number Original line Diff line number Diff line
@@ -308,6 +308,28 @@ static inline void kvmppc_restore_tm_sprs(struct kvm_vcpu *vcpu)
	tm_disable();
	tm_disable();
}
}


/* loadup math bits which is enabled at kvmppc_get_msr() but not enabled at
 * hardware.
 */
static void kvmppc_handle_lost_math_exts(struct kvm_vcpu *vcpu)
{
	ulong exit_nr;
	ulong ext_diff = (kvmppc_get_msr(vcpu) & ~vcpu->arch.guest_owned_ext) &
		(MSR_FP | MSR_VEC | MSR_VSX);

	if (!ext_diff)
		return;

	if (ext_diff == MSR_FP)
		exit_nr = BOOK3S_INTERRUPT_FP_UNAVAIL;
	else if (ext_diff == MSR_VEC)
		exit_nr = BOOK3S_INTERRUPT_ALTIVEC;
	else
		exit_nr = BOOK3S_INTERRUPT_VSX;

	kvmppc_handle_ext(vcpu, exit_nr, ext_diff);
}

void kvmppc_save_tm_pr(struct kvm_vcpu *vcpu)
void kvmppc_save_tm_pr(struct kvm_vcpu *vcpu)
{
{
	if (!(MSR_TM_ACTIVE(kvmppc_get_msr(vcpu)))) {
	if (!(MSR_TM_ACTIVE(kvmppc_get_msr(vcpu)))) {
@@ -315,6 +337,8 @@ void kvmppc_save_tm_pr(struct kvm_vcpu *vcpu)
		return;
		return;
	}
	}


	kvmppc_giveup_ext(vcpu, MSR_VSX);

	preempt_disable();
	preempt_disable();
	_kvmppc_save_tm_pr(vcpu, mfmsr());
	_kvmppc_save_tm_pr(vcpu, mfmsr());
	preempt_enable();
	preempt_enable();
@@ -324,12 +348,18 @@ void kvmppc_restore_tm_pr(struct kvm_vcpu *vcpu)
{
{
	if (!MSR_TM_ACTIVE(kvmppc_get_msr(vcpu))) {
	if (!MSR_TM_ACTIVE(kvmppc_get_msr(vcpu))) {
		kvmppc_restore_tm_sprs(vcpu);
		kvmppc_restore_tm_sprs(vcpu);
		if (kvmppc_get_msr(vcpu) & MSR_TM)
			kvmppc_handle_lost_math_exts(vcpu);
		return;
		return;
	}
	}


	preempt_disable();
	preempt_disable();
	_kvmppc_restore_tm_pr(vcpu, kvmppc_get_msr(vcpu));
	_kvmppc_restore_tm_pr(vcpu, kvmppc_get_msr(vcpu));
	preempt_enable();
	preempt_enable();

	if (kvmppc_get_msr(vcpu) & MSR_TM)
		kvmppc_handle_lost_math_exts(vcpu);

}
}
#endif
#endif


@@ -468,6 +498,11 @@ static void kvmppc_set_msr_pr(struct kvm_vcpu *vcpu, u64 msr)
	/* Preload FPU if it's enabled */
	/* Preload FPU if it's enabled */
	if (kvmppc_get_msr(vcpu) & MSR_FP)
	if (kvmppc_get_msr(vcpu) & MSR_FP)
		kvmppc_handle_ext(vcpu, BOOK3S_INTERRUPT_FP_UNAVAIL, MSR_FP);
		kvmppc_handle_ext(vcpu, BOOK3S_INTERRUPT_FP_UNAVAIL, MSR_FP);

#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
	if (kvmppc_get_msr(vcpu) & MSR_TM)
		kvmppc_handle_lost_math_exts(vcpu);
#endif
}
}


void kvmppc_set_pvr_pr(struct kvm_vcpu *vcpu, u32 pvr)
void kvmppc_set_pvr_pr(struct kvm_vcpu *vcpu, u32 pvr)