Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 8d2e2fc5 authored by Simon Guo's avatar Simon Guo Committed by Paul Mackerras
Browse files

KVM: PPC: Book3S PR: Add transaction memory save/restore skeleton



The transaction memory checkpoint area save/restore behavior is
triggered when VCPU qemu process is switching out/into CPU, i.e.
at kvmppc_core_vcpu_put_pr() and kvmppc_core_vcpu_load_pr().

MSR TM active state is determined by TS bits:
    active: 10(transactional) or 01 (suspended)
    inactive: 00 (non-transactional)
We don't "fake" TM functionality for guest. We "sync" guest virtual
MSR TM active state(10 or 01) with shadow MSR. That is to say,
we don't emulate a transactional guest with a TM inactive MSR.

TM SPR support(TFIAR/TFAR/TEXASR) has already been supported by
commit 9916d57e ("KVM: PPC: Book3S PR: Expose TM registers").
Math register support (FPR/VMX/VSX) will be done at subsequent
patch.

Whether TM context need to be saved/restored can be determined
by kvmppc_get_msr() TM active state:
	* TM active - save/restore TM context
	* TM inactive - no need to do so and only save/restore
TM SPRs.

Signed-off-by: default avatarSimon Guo <wei.guo.simon@gmail.com>
Suggested-by: default avatarPaul Mackerras <paulus@ozlabs.org>
Signed-off-by: default avatarPaul Mackerras <paulus@ozlabs.org>
parent 66c33e79
Loading
Loading
Loading
Loading
+9 −0
Original line number Original line Diff line number Diff line
@@ -257,6 +257,15 @@ extern int kvmppc_hcall_impl_pr(unsigned long cmd);
extern int kvmppc_hcall_impl_hv_realmode(unsigned long cmd);
extern int kvmppc_hcall_impl_hv_realmode(unsigned long cmd);
extern void kvmppc_copy_to_svcpu(struct kvm_vcpu *vcpu);
extern void kvmppc_copy_to_svcpu(struct kvm_vcpu *vcpu);
extern void kvmppc_copy_from_svcpu(struct kvm_vcpu *vcpu);
extern void kvmppc_copy_from_svcpu(struct kvm_vcpu *vcpu);

#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
void kvmppc_save_tm_pr(struct kvm_vcpu *vcpu);
void kvmppc_restore_tm_pr(struct kvm_vcpu *vcpu);
#else
static inline void kvmppc_save_tm_pr(struct kvm_vcpu *vcpu) {}
static inline void kvmppc_restore_tm_pr(struct kvm_vcpu *vcpu) {}
#endif

extern int kvm_irq_bypass;
extern int kvm_irq_bypass;


static inline struct kvmppc_vcpu_book3s *to_book3s(struct kvm_vcpu *vcpu)
static inline struct kvmppc_vcpu_book3s *to_book3s(struct kvm_vcpu *vcpu)
+0 −1
Original line number Original line Diff line number Diff line
@@ -627,7 +627,6 @@ struct kvm_vcpu_arch {


	struct thread_vr_state vr_tm;
	struct thread_vr_state vr_tm;
	u32 vrsave_tm; /* also USPRG0 */
	u32 vrsave_tm; /* also USPRG0 */

#endif
#endif


#ifdef CONFIG_KVM_EXIT_TIMING
#ifdef CONFIG_KVM_EXIT_TIMING
+27 −0
Original line number Original line Diff line number Diff line
@@ -43,6 +43,7 @@
#include <linux/module.h>
#include <linux/module.h>
#include <linux/miscdevice.h>
#include <linux/miscdevice.h>
#include <asm/asm-prototypes.h>
#include <asm/asm-prototypes.h>
#include <asm/tm.h>


#include "book3s.h"
#include "book3s.h"


@@ -115,6 +116,8 @@ static void kvmppc_core_vcpu_load_pr(struct kvm_vcpu *vcpu, int cpu)


	if (kvmppc_is_split_real(vcpu))
	if (kvmppc_is_split_real(vcpu))
		kvmppc_fixup_split_real(vcpu);
		kvmppc_fixup_split_real(vcpu);

	kvmppc_restore_tm_pr(vcpu);
}
}


static void kvmppc_core_vcpu_put_pr(struct kvm_vcpu *vcpu)
static void kvmppc_core_vcpu_put_pr(struct kvm_vcpu *vcpu)
@@ -134,6 +137,7 @@ static void kvmppc_core_vcpu_put_pr(struct kvm_vcpu *vcpu)


	kvmppc_giveup_ext(vcpu, MSR_FP | MSR_VEC | MSR_VSX);
	kvmppc_giveup_ext(vcpu, MSR_FP | MSR_VEC | MSR_VSX);
	kvmppc_giveup_fac(vcpu, FSCR_TAR_LG);
	kvmppc_giveup_fac(vcpu, FSCR_TAR_LG);
	kvmppc_save_tm_pr(vcpu);


	/* Enable AIL if supported */
	/* Enable AIL if supported */
	if (cpu_has_feature(CPU_FTR_HVMODE) &&
	if (cpu_has_feature(CPU_FTR_HVMODE) &&
@@ -304,6 +308,29 @@ static inline void kvmppc_restore_tm_sprs(struct kvm_vcpu *vcpu)
	tm_disable();
	tm_disable();
}
}


void kvmppc_save_tm_pr(struct kvm_vcpu *vcpu)
{
	if (!(MSR_TM_ACTIVE(kvmppc_get_msr(vcpu)))) {
		kvmppc_save_tm_sprs(vcpu);
		return;
	}

	preempt_disable();
	_kvmppc_save_tm_pr(vcpu, mfmsr());
	preempt_enable();
}

void kvmppc_restore_tm_pr(struct kvm_vcpu *vcpu)
{
	if (!MSR_TM_ACTIVE(kvmppc_get_msr(vcpu))) {
		kvmppc_restore_tm_sprs(vcpu);
		return;
	}

	preempt_disable();
	_kvmppc_restore_tm_pr(vcpu, kvmppc_get_msr(vcpu));
	preempt_enable();
}
#endif
#endif


static int kvmppc_core_check_requests_pr(struct kvm_vcpu *vcpu)
static int kvmppc_core_check_requests_pr(struct kvm_vcpu *vcpu)