Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit c72544d8 authored by Radim Krčmář's avatar Radim Krčmář
Browse files
* fix problems that could cause hangs or crashes in the host on POWER9
* fix problems that could allow guests to potentially affect or disrupt
  the execution of the controlling userspace
parents 41f1830f 3d3efb68
Loading
Loading
Loading
Loading
+51 −0
Original line number Original line Diff line number Diff line
@@ -1486,6 +1486,14 @@ static int kvmppc_set_one_reg_hv(struct kvm_vcpu *vcpu, u64 id,
		r = set_vpa(vcpu, &vcpu->arch.dtl, addr, len);
		r = set_vpa(vcpu, &vcpu->arch.dtl, addr, len);
		break;
		break;
	case KVM_REG_PPC_TB_OFFSET:
	case KVM_REG_PPC_TB_OFFSET:
		/*
		 * POWER9 DD1 has an erratum where writing TBU40 causes
		 * the timebase to lose ticks.  So we don't let the
		 * timebase offset be changed on P9 DD1.  (It is
		 * initialized to zero.)
		 */
		if (cpu_has_feature(CPU_FTR_POWER9_DD1))
			break;
		/* round up to multiple of 2^24 */
		/* round up to multiple of 2^24 */
		vcpu->arch.vcore->tb_offset =
		vcpu->arch.vcore->tb_offset =
			ALIGN(set_reg_val(id, *val), 1UL << 24);
			ALIGN(set_reg_val(id, *val), 1UL << 24);
@@ -2907,12 +2915,36 @@ static int kvmppc_vcpu_run_hv(struct kvm_run *run, struct kvm_vcpu *vcpu)
{
{
	int r;
	int r;
	int srcu_idx;
	int srcu_idx;
	unsigned long ebb_regs[3] = {};	/* shut up GCC */
	unsigned long user_tar = 0;
	unsigned int user_vrsave;


	if (!vcpu->arch.sane) {
	if (!vcpu->arch.sane) {
		run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
		run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
		return -EINVAL;
		return -EINVAL;
	}
	}


	/*
	 * Don't allow entry with a suspended transaction, because
	 * the guest entry/exit code will lose it.
	 * If the guest has TM enabled, save away their TM-related SPRs
	 * (they will get restored by the TM unavailable interrupt).
	 */
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
	if (cpu_has_feature(CPU_FTR_TM) && current->thread.regs &&
	    (current->thread.regs->msr & MSR_TM)) {
		if (MSR_TM_ACTIVE(current->thread.regs->msr)) {
			run->exit_reason = KVM_EXIT_FAIL_ENTRY;
			run->fail_entry.hardware_entry_failure_reason = 0;
			return -EINVAL;
		}
		current->thread.tm_tfhar = mfspr(SPRN_TFHAR);
		current->thread.tm_tfiar = mfspr(SPRN_TFIAR);
		current->thread.tm_texasr = mfspr(SPRN_TEXASR);
		current->thread.regs->msr &= ~MSR_TM;
	}
#endif

	kvmppc_core_prepare_to_enter(vcpu);
	kvmppc_core_prepare_to_enter(vcpu);


	/* No need to go into the guest when all we'll do is come back out */
	/* No need to go into the guest when all we'll do is come back out */
@@ -2934,6 +2966,15 @@ static int kvmppc_vcpu_run_hv(struct kvm_run *run, struct kvm_vcpu *vcpu)


	flush_all_to_thread(current);
	flush_all_to_thread(current);


	/* Save userspace EBB and other register values */
	if (cpu_has_feature(CPU_FTR_ARCH_207S)) {
		ebb_regs[0] = mfspr(SPRN_EBBHR);
		ebb_regs[1] = mfspr(SPRN_EBBRR);
		ebb_regs[2] = mfspr(SPRN_BESCR);
		user_tar = mfspr(SPRN_TAR);
	}
	user_vrsave = mfspr(SPRN_VRSAVE);

	vcpu->arch.wqp = &vcpu->arch.vcore->wq;
	vcpu->arch.wqp = &vcpu->arch.vcore->wq;
	vcpu->arch.pgdir = current->mm->pgd;
	vcpu->arch.pgdir = current->mm->pgd;
	vcpu->arch.state = KVMPPC_VCPU_BUSY_IN_HOST;
	vcpu->arch.state = KVMPPC_VCPU_BUSY_IN_HOST;
@@ -2960,6 +3001,16 @@ static int kvmppc_vcpu_run_hv(struct kvm_run *run, struct kvm_vcpu *vcpu)
		}
		}
	} while (is_kvmppc_resume_guest(r));
	} while (is_kvmppc_resume_guest(r));


	/* Restore userspace EBB and other register values */
	if (cpu_has_feature(CPU_FTR_ARCH_207S)) {
		mtspr(SPRN_EBBHR, ebb_regs[0]);
		mtspr(SPRN_EBBRR, ebb_regs[1]);
		mtspr(SPRN_BESCR, ebb_regs[2]);
		mtspr(SPRN_TAR, user_tar);
		mtspr(SPRN_FSCR, current->thread.fscr);
	}
	mtspr(SPRN_VRSAVE, user_vrsave);

 out:
 out:
	vcpu->arch.state = KVMPPC_VCPU_NOTREADY;
	vcpu->arch.state = KVMPPC_VCPU_NOTREADY;
	atomic_dec(&vcpu->kvm->arch.vcpus_running);
	atomic_dec(&vcpu->kvm->arch.vcpus_running);
+11 −1
Original line number Original line Diff line number Diff line
@@ -121,10 +121,20 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
	 * Put whatever is in the decrementer into the
	 * Put whatever is in the decrementer into the
	 * hypervisor decrementer.
	 * hypervisor decrementer.
	 */
	 */
BEGIN_FTR_SECTION
	ld	r5, HSTATE_KVM_VCORE(r13)
	ld	r6, VCORE_KVM(r5)
	ld	r9, KVM_HOST_LPCR(r6)
	andis.	r9, r9, LPCR_LD@h
END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
	mfspr	r8,SPRN_DEC
	mfspr	r8,SPRN_DEC
	mftb	r7
	mftb	r7
	mtspr	SPRN_HDEC,r8
BEGIN_FTR_SECTION
	/* On POWER9, don't sign-extend if host LPCR[LD] bit is set */
	bne	32f
END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
	extsw	r8,r8
	extsw	r8,r8
32:	mtspr	SPRN_HDEC,r8
	add	r8,r8,r7
	add	r8,r8,r7
	std	r8,HSTATE_DECEXP(r13)
	std	r8,HSTATE_DECEXP(r13)


+56 −19
Original line number Original line Diff line number Diff line
@@ -32,12 +32,29 @@
#include <asm/opal.h>
#include <asm/opal.h>
#include <asm/xive-regs.h>
#include <asm/xive-regs.h>


/* Sign-extend HDEC if not on POWER9 */
#define EXTEND_HDEC(reg)			\
BEGIN_FTR_SECTION;				\
	extsw	reg, reg;			\
END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_300)

#define VCPU_GPRS_TM(reg) (((reg) * ULONG_SIZE) + VCPU_GPR_TM)
#define VCPU_GPRS_TM(reg) (((reg) * ULONG_SIZE) + VCPU_GPR_TM)


/* Values in HSTATE_NAPPING(r13) */
/* Values in HSTATE_NAPPING(r13) */
#define NAPPING_CEDE	1
#define NAPPING_CEDE	1
#define NAPPING_NOVCPU	2
#define NAPPING_NOVCPU	2


/* Stack frame offsets for kvmppc_hv_entry */
#define SFS			144
#define STACK_SLOT_TRAP		(SFS-4)
#define STACK_SLOT_TID		(SFS-16)
#define STACK_SLOT_PSSCR	(SFS-24)
#define STACK_SLOT_PID		(SFS-32)
#define STACK_SLOT_IAMR		(SFS-40)
#define STACK_SLOT_CIABR	(SFS-48)
#define STACK_SLOT_DAWR		(SFS-56)
#define STACK_SLOT_DAWRX	(SFS-64)

/*
/*
 * Call kvmppc_hv_entry in real mode.
 * Call kvmppc_hv_entry in real mode.
 * Must be called with interrupts hard-disabled.
 * Must be called with interrupts hard-disabled.
@@ -214,6 +231,8 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
kvmppc_primary_no_guest:
kvmppc_primary_no_guest:
	/* We handle this much like a ceded vcpu */
	/* We handle this much like a ceded vcpu */
	/* put the HDEC into the DEC, since HDEC interrupts don't wake us */
	/* put the HDEC into the DEC, since HDEC interrupts don't wake us */
	/* HDEC may be larger than DEC for arch >= v3.00, but since the */
	/* HDEC value came from DEC in the first place, it will fit */
	mfspr	r3, SPRN_HDEC
	mfspr	r3, SPRN_HDEC
	mtspr	SPRN_DEC, r3
	mtspr	SPRN_DEC, r3
	/*
	/*
@@ -295,8 +314,9 @@ kvm_novcpu_wakeup:


	/* See if our timeslice has expired (HDEC is negative) */
	/* See if our timeslice has expired (HDEC is negative) */
	mfspr	r0, SPRN_HDEC
	mfspr	r0, SPRN_HDEC
	EXTEND_HDEC(r0)
	li	r12, BOOK3S_INTERRUPT_HV_DECREMENTER
	li	r12, BOOK3S_INTERRUPT_HV_DECREMENTER
	cmpwi	r0, 0
	cmpdi	r0, 0
	blt	kvm_novcpu_exit
	blt	kvm_novcpu_exit


	/* Got an IPI but other vcpus aren't yet exiting, must be a latecomer */
	/* Got an IPI but other vcpus aren't yet exiting, must be a latecomer */
@@ -319,10 +339,10 @@ kvm_novcpu_exit:
	bl	kvmhv_accumulate_time
	bl	kvmhv_accumulate_time
#endif
#endif
13:	mr	r3, r12
13:	mr	r3, r12
	stw	r12, 112-4(r1)
	stw	r12, STACK_SLOT_TRAP(r1)
	bl	kvmhv_commence_exit
	bl	kvmhv_commence_exit
	nop
	nop
	lwz	r12, 112-4(r1)
	lwz	r12, STACK_SLOT_TRAP(r1)
	b	kvmhv_switch_to_host
	b	kvmhv_switch_to_host


/*
/*
@@ -390,8 +410,8 @@ kvm_secondary_got_guest:
	lbz	r4, HSTATE_PTID(r13)
	lbz	r4, HSTATE_PTID(r13)
	cmpwi	r4, 0
	cmpwi	r4, 0
	bne	63f
	bne	63f
	lis	r6, 0x7fff
	LOAD_REG_ADDR(r6, decrementer_max)
	ori	r6, r6, 0xffff
	ld	r6, 0(r6)
	mtspr	SPRN_HDEC, r6
	mtspr	SPRN_HDEC, r6
	/* and set per-LPAR registers, if doing dynamic micro-threading */
	/* and set per-LPAR registers, if doing dynamic micro-threading */
	ld	r6, HSTATE_SPLIT_MODE(r13)
	ld	r6, HSTATE_SPLIT_MODE(r13)
@@ -545,11 +565,6 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
 *                                                                            *
 *                                                                            *
 *****************************************************************************/
 *****************************************************************************/


/* Stack frame offsets */
#define STACK_SLOT_TID		(112-16)
#define STACK_SLOT_PSSCR	(112-24)
#define STACK_SLOT_PID		(112-32)

.global kvmppc_hv_entry
.global kvmppc_hv_entry
kvmppc_hv_entry:
kvmppc_hv_entry:


@@ -565,7 +580,7 @@ kvmppc_hv_entry:
	 */
	 */
	mflr	r0
	mflr	r0
	std	r0, PPC_LR_STKOFF(r1)
	std	r0, PPC_LR_STKOFF(r1)
	stdu	r1, -112(r1)
	stdu	r1, -SFS(r1)


	/* Save R1 in the PACA */
	/* Save R1 in the PACA */
	std	r1, HSTATE_HOST_R1(r13)
	std	r1, HSTATE_HOST_R1(r13)
@@ -749,10 +764,20 @@ BEGIN_FTR_SECTION
	mfspr	r5, SPRN_TIDR
	mfspr	r5, SPRN_TIDR
	mfspr	r6, SPRN_PSSCR
	mfspr	r6, SPRN_PSSCR
	mfspr	r7, SPRN_PID
	mfspr	r7, SPRN_PID
	mfspr	r8, SPRN_IAMR
	std	r5, STACK_SLOT_TID(r1)
	std	r5, STACK_SLOT_TID(r1)
	std	r6, STACK_SLOT_PSSCR(r1)
	std	r6, STACK_SLOT_PSSCR(r1)
	std	r7, STACK_SLOT_PID(r1)
	std	r7, STACK_SLOT_PID(r1)
	std	r8, STACK_SLOT_IAMR(r1)
END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
BEGIN_FTR_SECTION
	mfspr	r5, SPRN_CIABR
	mfspr	r6, SPRN_DAWR
	mfspr	r7, SPRN_DAWRX
	std	r5, STACK_SLOT_CIABR(r1)
	std	r6, STACK_SLOT_DAWR(r1)
	std	r7, STACK_SLOT_DAWRX(r1)
END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)


BEGIN_FTR_SECTION
BEGIN_FTR_SECTION
	/* Set partition DABR */
	/* Set partition DABR */
@@ -968,7 +993,8 @@ ALT_FTR_SECTION_END_IFCLR(CPU_FTR_ARCH_300)


	/* Check if HDEC expires soon */
	/* Check if HDEC expires soon */
	mfspr	r3, SPRN_HDEC
	mfspr	r3, SPRN_HDEC
	cmpwi	r3, 512		/* 1 microsecond */
	EXTEND_HDEC(r3)
	cmpdi	r3, 512		/* 1 microsecond */
	blt	hdec_soon
	blt	hdec_soon


#ifdef CONFIG_KVM_XICS
#ifdef CONFIG_KVM_XICS
@@ -1505,11 +1531,10 @@ ALT_FTR_SECTION_END_IFCLR(CPU_FTR_ARCH_300)
	 * set by the guest could disrupt the host.
	 * set by the guest could disrupt the host.
	 */
	 */
	li	r0, 0
	li	r0, 0
	mtspr	SPRN_IAMR, r0
	mtspr	SPRN_PSPB, r0
	mtspr	SPRN_CIABR, r0
	mtspr	SPRN_DAWRX, r0
	mtspr	SPRN_WORT, r0
	mtspr	SPRN_WORT, r0
BEGIN_FTR_SECTION
BEGIN_FTR_SECTION
	mtspr	SPRN_IAMR, r0
	mtspr	SPRN_TCSCR, r0
	mtspr	SPRN_TCSCR, r0
	/* Set MMCRS to 1<<31 to freeze and disable the SPMC counters */
	/* Set MMCRS to 1<<31 to freeze and disable the SPMC counters */
	li	r0, 1
	li	r0, 1
@@ -1525,6 +1550,7 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_300)
	std	r6,VCPU_UAMOR(r9)
	std	r6,VCPU_UAMOR(r9)
	li	r6,0
	li	r6,0
	mtspr	SPRN_AMR,r6
	mtspr	SPRN_AMR,r6
	mtspr	SPRN_UAMOR, r6


	/* Switch DSCR back to host value */
	/* Switch DSCR back to host value */
	mfspr	r8, SPRN_DSCR
	mfspr	r8, SPRN_DSCR
@@ -1669,13 +1695,23 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
	ptesync
	ptesync


	/* Restore host values of some registers */
	/* Restore host values of some registers */
BEGIN_FTR_SECTION
	ld	r5, STACK_SLOT_CIABR(r1)
	ld	r6, STACK_SLOT_DAWR(r1)
	ld	r7, STACK_SLOT_DAWRX(r1)
	mtspr	SPRN_CIABR, r5
	mtspr	SPRN_DAWR, r6
	mtspr	SPRN_DAWRX, r7
END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
BEGIN_FTR_SECTION
BEGIN_FTR_SECTION
	ld	r5, STACK_SLOT_TID(r1)
	ld	r5, STACK_SLOT_TID(r1)
	ld	r6, STACK_SLOT_PSSCR(r1)
	ld	r6, STACK_SLOT_PSSCR(r1)
	ld	r7, STACK_SLOT_PID(r1)
	ld	r7, STACK_SLOT_PID(r1)
	ld	r8, STACK_SLOT_IAMR(r1)
	mtspr	SPRN_TIDR, r5
	mtspr	SPRN_TIDR, r5
	mtspr	SPRN_PSSCR, r6
	mtspr	SPRN_PSSCR, r6
	mtspr	SPRN_PID, r7
	mtspr	SPRN_PID, r7
	mtspr	SPRN_IAMR, r8
END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
BEGIN_FTR_SECTION
BEGIN_FTR_SECTION
	PPC_INVALIDATE_ERAT
	PPC_INVALIDATE_ERAT
@@ -1819,8 +1855,8 @@ END_MMU_FTR_SECTION_IFSET(MMU_FTR_TYPE_RADIX)
	li	r0, KVM_GUEST_MODE_NONE
	li	r0, KVM_GUEST_MODE_NONE
	stb	r0, HSTATE_IN_GUEST(r13)
	stb	r0, HSTATE_IN_GUEST(r13)


	ld	r0, 112+PPC_LR_STKOFF(r1)
	ld	r0, SFS+PPC_LR_STKOFF(r1)
	addi	r1, r1, 112
	addi	r1, r1, SFS
	mtlr	r0
	mtlr	r0
	blr
	blr


@@ -2366,12 +2402,13 @@ END_FTR_SECTION_IFSET(CPU_FTR_TM)
	mfspr	r3, SPRN_DEC
	mfspr	r3, SPRN_DEC
	mfspr	r4, SPRN_HDEC
	mfspr	r4, SPRN_HDEC
	mftb	r5
	mftb	r5
	cmpw	r3, r4
	extsw	r3, r3
	EXTEND_HDEC(r4)
	cmpd	r3, r4
	ble	67f
	ble	67f
	mtspr	SPRN_DEC, r4
	mtspr	SPRN_DEC, r4
67:
67:
	/* save expiry time of guest decrementer */
	/* save expiry time of guest decrementer */
	extsw	r3, r3
	add	r3, r3, r5
	add	r3, r3, r5
	ld	r4, HSTATE_KVM_VCPU(r13)
	ld	r4, HSTATE_KVM_VCPU(r13)
	ld	r5, HSTATE_KVM_VCORE(r13)
	ld	r5, HSTATE_KVM_VCORE(r13)