Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 4c2de74c authored by Nicholas Piggin's avatar Nicholas Piggin Committed by Michael Ellerman
Browse files

powerpc/64: Interrupts save PPR on stack rather than thread_struct



PPR is the odd register out when it comes to interrupt handling, it is
saved in current->thread.ppr while all others are saved on the stack.

The difficulty with this is that accessing thread.ppr can cause a SLB
fault, but the SLB fault handler implementation in C change had
assumed the normal exception entry handlers would not cause an SLB
fault.

Fix this by allocating room in the interrupt stack to save PPR.

Signed-off-by: default avatarNicholas Piggin <npiggin@gmail.com>
Signed-off-by: default avatarMichael Ellerman <mpe@ellerman.id.au>
parent 3eeacd9f
Loading
Loading
Loading
Loading
+4 −5
Original line number Diff line number Diff line
@@ -236,11 +236,10 @@
 * PPR save/restore macros used in exceptions_64s.S  
 * Used for P7 or later processors
 */
#define SAVE_PPR(area, ra, rb)						\
#define SAVE_PPR(area, ra)						\
BEGIN_FTR_SECTION_NESTED(940)						\
	ld	ra,PACACURRENT(r13);					\
	ld	rb,area+EX_PPR(r13);	/* Read PPR from paca */	\
	std	rb,TASKTHREADPPR(ra);					\
	ld	ra,area+EX_PPR(r13);	/* Read PPR from paca */	\
	std	ra,_PPR(r1);						\
END_FTR_SECTION_NESTED(CPU_FTR_HAS_PPR,CPU_FTR_HAS_PPR,940)

#define RESTORE_PPR_PACA(area, ra)					\
@@ -508,7 +507,7 @@ END_FTR_SECTION_NESTED(ftr,ftr,943)
3:	EXCEPTION_PROLOG_COMMON_1();					   \
	beq	4f;			/* if from kernel mode		*/ \
	ACCOUNT_CPU_USER_ENTRY(r13, r9, r10);				   \
	SAVE_PPR(area, r9, r10);					   \
	SAVE_PPR(area, r9);						   \
4:	EXCEPTION_PROLOG_COMMON_2(area)					   \
	EXCEPTION_PROLOG_COMMON_3(n)					   \
	ACCOUNT_STOLEN_TIME
+2 −4
Original line number Diff line number Diff line
@@ -32,9 +32,9 @@
/* Default SMT priority is set to 3. Use 11- 13bits to save priority. */
#define PPR_PRIORITY 3
#ifdef __ASSEMBLY__
#define INIT_PPR (PPR_PRIORITY << 50)
#define DEFAULT_PPR (PPR_PRIORITY << 50)
#else
#define INIT_PPR ((u64)PPR_PRIORITY << 50)
#define DEFAULT_PPR ((u64)PPR_PRIORITY << 50)
#endif /* __ASSEMBLY__ */
#endif /* CONFIG_PPC64 */

@@ -341,7 +341,6 @@ struct thread_struct {
	 * onwards.
	 */
	int		dscr_inherit;
	unsigned long	ppr;	/* used to save/restore SMT priority */
	unsigned long	tidr;
#endif
#ifdef CONFIG_PPC_BOOK3S_64
@@ -389,7 +388,6 @@ struct thread_struct {
	.regs = (struct pt_regs *)INIT_SP - 1, /* XXX bogus, I think */ \
	.addr_limit = KERNEL_DS, \
	.fpexc_mode = 0, \
	.ppr = INIT_PPR, \
	.fscr = FSCR_TAR | FSCR_EBB \
}
#endif
+4 −0
Original line number Diff line number Diff line
@@ -51,6 +51,10 @@ struct pt_regs
			unsigned long result;
		};
	};

#ifdef CONFIG_PPC64
	unsigned long ppr;
#endif
};
#endif

+1 −1
Original line number Diff line number Diff line
@@ -89,7 +89,6 @@ int main(void)
#ifdef CONFIG_PPC64
	DEFINE(SIGSEGV, SIGSEGV);
	DEFINE(NMI_MASK, NMI_MASK);
	OFFSET(TASKTHREADPPR, task_struct, thread.ppr);
#else
	OFFSET(THREAD_INFO, task_struct, stack);
	DEFINE(THREAD_INFO_GAP, _ALIGN_UP(sizeof(struct thread_info), 16));
@@ -323,6 +322,7 @@ int main(void)
	STACK_PT_REGS_OFFSET(_ESR, dsisr);
#else /* CONFIG_PPC64 */
	STACK_PT_REGS_OFFSET(SOFTE, softe);
	STACK_PT_REGS_OFFSET(_PPR, ppr);
#endif /* CONFIG_PPC64 */

#if defined(CONFIG_PPC32)
+5 −10
Original line number Diff line number Diff line
@@ -386,10 +386,9 @@ END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)

4:	/* Anything else left to do? */
BEGIN_FTR_SECTION
	lis	r3,INIT_PPR@highest	/* Set thread.ppr = 3 */
	ld	r10,PACACURRENT(r13)
	lis	r3,DEFAULT_PPR@highest	/* Set default PPR */
	sldi	r3,r3,32	/* bits 11-13 are used for ppr */
	std	r3,TASKTHREADPPR(r10)
	std	r3,_PPR(r1)
END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)

	andi.	r0,r9,(_TIF_SYSCALL_DOTRACE|_TIF_SINGLESTEP)
@@ -942,12 +941,6 @@ fast_exception_return:
	andi.	r0,r3,MSR_RI
	beq-	.Lunrecov_restore

	/* Load PPR from thread struct before we clear MSR:RI */
BEGIN_FTR_SECTION
	ld	r2,PACACURRENT(r13)
	ld	r2,TASKTHREADPPR(r2)
END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)

	/*
	 * Clear RI before restoring r13.  If we are returning to
	 * userspace and we take an exception after restoring r13,
@@ -968,7 +961,9 @@ END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
	andi.	r0,r3,MSR_PR
	beq	1f
BEGIN_FTR_SECTION
	mtspr	SPRN_PPR,r2	/* Restore PPR */
	/* Restore PPR */
	ld	r2,_PPR(r1)
	mtspr	SPRN_PPR,r2
END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
	ACCOUNT_CPU_USER_EXIT(r13, r2, r4)
	REST_GPR(13, r1)
Loading