Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 769377f7 authored by Paul Mackerras's avatar Paul Mackerras
Browse files

KVM: PPC: Book3S HV: Context-switch HFSCR between host and guest on POWER9



This adds code to allow us to use a different value for the HFSCR
(Hypervisor Facilities Status and Control Register) when running the
guest from that which applies in the host.  The reason for doing this
is to allow us to trap the msgsndp instruction and related operations
in future so that they can be virtualized.  We also save the value of
HFSCR when a hypervisor facility unavailable interrupt occurs, because
the high byte of HFSCR indicates which facility the guest attempted to
access.

We save and restore the host value on guest entry/exit because some
bits of it affect host userspace execution.

We only do all this on POWER9, not on POWER8, because we are not
intending to virtualize any of the facilities controlled by HFSCR on
POWER8.  In particular, the HFSCR bit that controls execution of
msgsndp and related operations does not exist on POWER8.  The HFSCR
doesn't exist at all on POWER7.

Signed-off-by: default avatarPaul Mackerras <paulus@ozlabs.org>
parent 1da4e2f4
Loading
Loading
Loading
Loading
+1 −0
Original line number Original line Diff line number Diff line
@@ -566,6 +566,7 @@ struct kvm_vcpu_arch {
	ulong wort;
	ulong wort;
	ulong tid;
	ulong tid;
	ulong psscr;
	ulong psscr;
	ulong hfscr;
	ulong shadow_srr1;
	ulong shadow_srr1;
#endif
#endif
	u32 vrsave; /* also USPRG0 */
	u32 vrsave; /* also USPRG0 */
+1 −0
Original line number Original line Diff line number Diff line
@@ -542,6 +542,7 @@ int main(void)
	OFFSET(VCPU_WORT, kvm_vcpu, arch.wort);
	OFFSET(VCPU_WORT, kvm_vcpu, arch.wort);
	OFFSET(VCPU_TID, kvm_vcpu, arch.tid);
	OFFSET(VCPU_TID, kvm_vcpu, arch.tid);
	OFFSET(VCPU_PSSCR, kvm_vcpu, arch.psscr);
	OFFSET(VCPU_PSSCR, kvm_vcpu, arch.psscr);
	OFFSET(VCPU_HFSCR, kvm_vcpu, arch.hfscr);
	OFFSET(VCORE_ENTRY_EXIT, kvmppc_vcore, entry_exit_map);
	OFFSET(VCORE_ENTRY_EXIT, kvmppc_vcore, entry_exit_map);
	OFFSET(VCORE_IN_GUEST, kvmppc_vcore, in_guest);
	OFFSET(VCORE_IN_GUEST, kvmppc_vcore, in_guest);
	OFFSET(VCORE_NAPPING_THREADS, kvmppc_vcore, napping_threads);
	OFFSET(VCORE_NAPPING_THREADS, kvmppc_vcore, napping_threads);
+10 −0
Original line number Original line Diff line number Diff line
@@ -1825,6 +1825,16 @@ static struct kvm_vcpu *kvmppc_core_vcpu_create_hv(struct kvm *kvm,
	vcpu->arch.busy_preempt = TB_NIL;
	vcpu->arch.busy_preempt = TB_NIL;
	vcpu->arch.intr_msr = MSR_SF | MSR_ME;
	vcpu->arch.intr_msr = MSR_SF | MSR_ME;


	/*
	 * Set the default HFSCR for the guest from the host value.
	 * This value is only used on POWER9.
	 * On POWER9 DD1, TM doesn't work, so we make sure to
	 * prevent the guest from using it.
	 */
	vcpu->arch.hfscr = mfspr(SPRN_HFSCR);
	if (!cpu_has_feature(CPU_FTR_TM))
		vcpu->arch.hfscr &= ~HFSCR_TM;

	kvmppc_mmu_book3s_hv_init(vcpu);
	kvmppc_mmu_book3s_hv_init(vcpu);


	vcpu->arch.state = KVMPPC_VCPU_NOTREADY;
	vcpu->arch.state = KVMPPC_VCPU_NOTREADY;
+16 −1
Original line number Original line Diff line number Diff line
@@ -45,7 +45,7 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_300)
#define NAPPING_NOVCPU	2
#define NAPPING_NOVCPU	2


/* Stack frame offsets for kvmppc_hv_entry */
/* Stack frame offsets for kvmppc_hv_entry */
#define SFS			144
#define SFS			160
#define STACK_SLOT_TRAP		(SFS-4)
#define STACK_SLOT_TRAP		(SFS-4)
#define STACK_SLOT_TID		(SFS-16)
#define STACK_SLOT_TID		(SFS-16)
#define STACK_SLOT_PSSCR	(SFS-24)
#define STACK_SLOT_PSSCR	(SFS-24)
@@ -54,6 +54,7 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_300)
#define STACK_SLOT_CIABR	(SFS-48)
#define STACK_SLOT_CIABR	(SFS-48)
#define STACK_SLOT_DAWR		(SFS-56)
#define STACK_SLOT_DAWR		(SFS-56)
#define STACK_SLOT_DAWRX	(SFS-64)
#define STACK_SLOT_DAWRX	(SFS-64)
#define STACK_SLOT_HFSCR	(SFS-72)


/*
/*
 * Call kvmppc_hv_entry in real mode.
 * Call kvmppc_hv_entry in real mode.
@@ -769,6 +770,8 @@ BEGIN_FTR_SECTION
	std	r6, STACK_SLOT_PSSCR(r1)
	std	r6, STACK_SLOT_PSSCR(r1)
	std	r7, STACK_SLOT_PID(r1)
	std	r7, STACK_SLOT_PID(r1)
	std	r8, STACK_SLOT_IAMR(r1)
	std	r8, STACK_SLOT_IAMR(r1)
	mfspr	r5, SPRN_HFSCR
	std	r5, STACK_SLOT_HFSCR(r1)
END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
BEGIN_FTR_SECTION
BEGIN_FTR_SECTION
	mfspr	r5, SPRN_CIABR
	mfspr	r5, SPRN_CIABR
@@ -920,8 +923,10 @@ FTR_SECTION_ELSE
	ld	r5, VCPU_TID(r4)
	ld	r5, VCPU_TID(r4)
	ld	r6, VCPU_PSSCR(r4)
	ld	r6, VCPU_PSSCR(r4)
	oris	r6, r6, PSSCR_EC@h	/* This makes stop trap to HV */
	oris	r6, r6, PSSCR_EC@h	/* This makes stop trap to HV */
	ld	r7, VCPU_HFSCR(r4)
	mtspr	SPRN_TIDR, r5
	mtspr	SPRN_TIDR, r5
	mtspr	SPRN_PSSCR, r6
	mtspr	SPRN_PSSCR, r6
	mtspr	SPRN_HFSCR, r7
ALT_FTR_SECTION_END_IFCLR(CPU_FTR_ARCH_300)
ALT_FTR_SECTION_END_IFCLR(CPU_FTR_ARCH_300)
8:
8:


@@ -1294,6 +1299,13 @@ END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
	beq	4f
	beq	4f
	b	guest_exit_cont
	b	guest_exit_cont
3:
3:
	/* If it's a hypervisor facility unavailable interrupt, save HFSCR */
	cmpwi	r12, BOOK3S_INTERRUPT_H_FAC_UNAVAIL
	bne	14f
	mfspr	r3, SPRN_HFSCR
	std	r3, VCPU_HFSCR(r9)
	b	guest_exit_cont
14:
	/* External interrupt ? */
	/* External interrupt ? */
	cmpwi	r12, BOOK3S_INTERRUPT_EXTERNAL
	cmpwi	r12, BOOK3S_INTERRUPT_EXTERNAL
	bne+	guest_exit_cont
	bne+	guest_exit_cont
@@ -1537,6 +1549,9 @@ FTR_SECTION_ELSE
	rldicl	r6, r6, 4, 50		/* r6 &= PSSCR_GUEST_VIS */
	rldicl	r6, r6, 4, 50		/* r6 &= PSSCR_GUEST_VIS */
	rotldi	r6, r6, 60
	rotldi	r6, r6, 60
	std	r6, VCPU_PSSCR(r9)
	std	r6, VCPU_PSSCR(r9)
	/* Restore host HFSCR value */
	ld	r7, STACK_SLOT_HFSCR(r1)
	mtspr	SPRN_HFSCR, r7
ALT_FTR_SECTION_END_IFCLR(CPU_FTR_ARCH_300)
ALT_FTR_SECTION_END_IFCLR(CPU_FTR_ARCH_300)
	/*
	/*
	 * Restore various registers to 0, where non-zero values
	 * Restore various registers to 0, where non-zero values