Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit ce48b210 authored by Michael Neuling's avatar Michael Neuling Committed by Paul Mackerras
Browse files

powerpc: Add VSX context save/restore, ptrace and signal support



This patch extends the floating point save and restore code to use the
VSX load/stores when VSX is available.  This will make FP context
save/restore marginally slower on FP only code, when VSX is available,
as it has to load/store 128bits rather than just 64bits.

Mixing FP, VMX and VSX code will get constant architected state.

The signals interface is extended to enable access to VSR 0-31
doubleword 1 after discussions with tool chain maintainers.  Backward
compatibility is maintained.

The ptrace interface is also extended to allow access to VSR 0-31 full
registers.

Signed-off-by: default avatarMichael Neuling <mikey@neuling.org>
Signed-off-by: default avatarPaul Mackerras <paulus@samba.org>
parent 72ffff5b
Loading
Loading
Loading
Loading
+5 −0
Original line number Original line Diff line number Diff line
@@ -353,6 +353,11 @@ _GLOBAL(_switch)
	mflr	r20		/* Return to switch caller */
	mflr	r20		/* Return to switch caller */
	mfmsr	r22
	mfmsr	r22
	li	r0, MSR_FP
	li	r0, MSR_FP
#ifdef CONFIG_VSX
BEGIN_FTR_SECTION
	oris	r0,r0,MSR_VSX@h	/* Disable VSX */
END_FTR_SECTION_IFSET(CPU_FTR_VSX)
#endif /* CONFIG_VSX */
#ifdef CONFIG_ALTIVEC
#ifdef CONFIG_ALTIVEC
BEGIN_FTR_SECTION
BEGIN_FTR_SECTION
	oris	r0,r0,MSR_VEC@h	/* Disable altivec */
	oris	r0,r0,MSR_VEC@h	/* Disable altivec */
+13 −3
Original line number Original line Diff line number Diff line
@@ -57,6 +57,11 @@ END_FTR_SECTION_IFSET(CPU_FTR_VSX); \
_GLOBAL(load_up_fpu)
_GLOBAL(load_up_fpu)
	mfmsr	r5
	mfmsr	r5
	ori	r5,r5,MSR_FP
	ori	r5,r5,MSR_FP
#ifdef CONFIG_VSX
BEGIN_FTR_SECTION
	oris	r5,r5,MSR_VSX@h
END_FTR_SECTION_IFSET(CPU_FTR_VSX)
#endif
	SYNC
	SYNC
	MTMSRD(r5)			/* enable use of fpu now */
	MTMSRD(r5)			/* enable use of fpu now */
	isync
	isync
@@ -73,7 +78,7 @@ _GLOBAL(load_up_fpu)
	beq	1f
	beq	1f
	toreal(r4)
	toreal(r4)
	addi	r4,r4,THREAD		/* want last_task_used_math->thread */
	addi	r4,r4,THREAD		/* want last_task_used_math->thread */
	SAVE_32FPRS(0, r4)
	SAVE_32FPVSRS(0, r5, r4)
	mffs	fr0
	mffs	fr0
	stfd	fr0,THREAD_FPSCR(r4)
	stfd	fr0,THREAD_FPSCR(r4)
	PPC_LL	r5,PT_REGS(r4)
	PPC_LL	r5,PT_REGS(r4)
@@ -100,7 +105,7 @@ _GLOBAL(load_up_fpu)
#endif
#endif
	lfd	fr0,THREAD_FPSCR(r5)
	lfd	fr0,THREAD_FPSCR(r5)
	MTFSF_L(fr0)
	MTFSF_L(fr0)
	REST_32FPRS(0, r5)
	REST_32FPVSRS(0, r4, r5)
#ifndef CONFIG_SMP
#ifndef CONFIG_SMP
	subi	r4,r5,THREAD
	subi	r4,r5,THREAD
	fromreal(r4)
	fromreal(r4)
@@ -119,6 +124,11 @@ _GLOBAL(load_up_fpu)
_GLOBAL(giveup_fpu)
_GLOBAL(giveup_fpu)
	mfmsr	r5
	mfmsr	r5
	ori	r5,r5,MSR_FP
	ori	r5,r5,MSR_FP
#ifdef CONFIG_VSX
BEGIN_FTR_SECTION
	oris	r5,r5,MSR_VSX@h
END_FTR_SECTION_IFSET(CPU_FTR_VSX)
#endif
	SYNC_601
	SYNC_601
	ISYNC_601
	ISYNC_601
	MTMSRD(r5)			/* enable use of fpu now */
	MTMSRD(r5)			/* enable use of fpu now */
@@ -129,7 +139,7 @@ _GLOBAL(giveup_fpu)
	addi	r3,r3,THREAD	        /* want THREAD of task */
	addi	r3,r3,THREAD	        /* want THREAD of task */
	PPC_LL	r5,PT_REGS(r3)
	PPC_LL	r5,PT_REGS(r3)
	PPC_LCMPI	0,r5,0
	PPC_LCMPI	0,r5,0
	SAVE_32FPRS(0, r3)
	SAVE_32FPVSRS(0, r4 ,r3)
	mffs	fr0
	mffs	fr0
	stfd	fr0,THREAD_FPSCR(r3)
	stfd	fr0,THREAD_FPSCR(r3)
	beq	1f
	beq	1f
+65 −0
Original line number Original line Diff line number Diff line
@@ -278,6 +278,9 @@ END_FTR_SECTION_IFSET(CPU_FTR_REAL_LE)
	. = 0xf20
	. = 0xf20
	b	altivec_unavailable_pSeries
	b	altivec_unavailable_pSeries


	. = 0xf40
	b	vsx_unavailable_pSeries

#ifdef CONFIG_CBE_RAS
#ifdef CONFIG_CBE_RAS
	HSTD_EXCEPTION_PSERIES(0x1200, cbe_system_error)
	HSTD_EXCEPTION_PSERIES(0x1200, cbe_system_error)
#endif /* CONFIG_CBE_RAS */
#endif /* CONFIG_CBE_RAS */
@@ -297,6 +300,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_REAL_LE)
	/* moved from 0xf00 */
	/* moved from 0xf00 */
	STD_EXCEPTION_PSERIES(., performance_monitor)
	STD_EXCEPTION_PSERIES(., performance_monitor)
	STD_EXCEPTION_PSERIES(., altivec_unavailable)
	STD_EXCEPTION_PSERIES(., altivec_unavailable)
	STD_EXCEPTION_PSERIES(., vsx_unavailable)


/*
/*
 * An interrupt came in while soft-disabled; clear EE in SRR1,
 * An interrupt came in while soft-disabled; clear EE in SRR1,
@@ -836,6 +840,67 @@ _STATIC(load_up_altivec)
	blr
	blr
#endif /* CONFIG_ALTIVEC */
#endif /* CONFIG_ALTIVEC */


	.align	7
	.globl vsx_unavailable_common
vsx_unavailable_common:
	EXCEPTION_PROLOG_COMMON(0xf40, PACA_EXGEN)
#ifdef CONFIG_VSX
BEGIN_FTR_SECTION
	bne	.load_up_vsx
1:
END_FTR_SECTION_IFSET(CPU_FTR_VSX)
#endif
	bl	.save_nvgprs
	addi	r3,r1,STACK_FRAME_OVERHEAD
	ENABLE_INTS
	bl	.vsx_unavailable_exception
	b	.ret_from_except

#ifdef CONFIG_VSX
/*
 * load_up_vsx(unused, unused, tsk)
 * Disable VSX for the task which had it previously,
 * and save its vector registers in its thread_struct.
 * Reuse the fp and vsx saves, but first check to see if they have
 * been saved already.
 * On entry: r13 == 'current' && last_task_used_vsx != 'current'
 */
_STATIC(load_up_vsx)
/* Load FP and VSX registers if they haven't been done yet */
	andi.	r5,r12,MSR_FP
	beql+	load_up_fpu		/* skip if already loaded */
	andis.	r5,r12,MSR_VEC@h
	beql+	load_up_altivec		/* skip if already loaded */

#ifndef CONFIG_SMP
	ld	r3,last_task_used_vsx@got(r2)
	ld	r4,0(r3)
	cmpdi	0,r4,0
	beq	1f
	/* Disable VSX for last_task_used_vsx */
	addi	r4,r4,THREAD
	ld	r5,PT_REGS(r4)
	ld	r4,_MSR-STACK_FRAME_OVERHEAD(r5)
	lis	r6,MSR_VSX@h
	andc	r6,r4,r6
	std	r6,_MSR-STACK_FRAME_OVERHEAD(r5)
1:
#endif /* CONFIG_SMP */
	ld	r4,PACACURRENT(r13)
	addi	r4,r4,THREAD		/* Get THREAD */
	li	r6,1
	stw	r6,THREAD_USED_VSR(r4) /* ... also set thread used vsr */
	/* enable use of VSX after return */
	oris	r12,r12,MSR_VSX@h
	std	r12,_MSR(r1)
#ifndef CONFIG_SMP
	/* Update last_task_used_math to 'current' */
	ld	r4,PACACURRENT(r13)
	std	r4,0(r3)
#endif /* CONFIG_SMP */
	b	fast_exception_return
#endif /* CONFIG_VSX */

/*
/*
 * Hash table stuff
 * Hash table stuff
 */
 */
+33 −0
Original line number Original line Diff line number Diff line
@@ -506,6 +506,39 @@ _GLOBAL(giveup_altivec)


#endif /* CONFIG_ALTIVEC */
#endif /* CONFIG_ALTIVEC */


#ifdef CONFIG_VSX
/*
 * giveup_vsx(tsk)
 * Disable VSX for the task given as the argument,
 * and save the vector registers in its thread_struct.
 * Enables the VSX for use in the kernel on return.
 */
_GLOBAL(giveup_vsx)
	mfmsr	r5
	oris	r5,r5,MSR_VSX@h
	mtmsrd	r5			/* enable use of VSX now */
	isync

	cmpdi	0,r3,0
	beqlr-				/* if no previous owner, done */
	addi	r3,r3,THREAD		/* want THREAD of task */
	ld	r5,PT_REGS(r3)
	cmpdi	0,r5,0
	beq	1f
	ld	r4,_MSR-STACK_FRAME_OVERHEAD(r5)
	lis	r3,MSR_VSX@h
	andc	r4,r4,r3		/* disable VSX for previous task */
	std	r4,_MSR-STACK_FRAME_OVERHEAD(r5)
1:
#ifndef CONFIG_SMP
	li	r5,0
	ld	r4,last_task_used_vsx@got(r2)
	std	r5,0(r4)
#endif /* CONFIG_SMP */
	blr

#endif /* CONFIG_VSX */

/* kexec_wait(phys_cpu)
/* kexec_wait(phys_cpu)
 *
 *
 * wait for the flag to change, indicating this kernel is going away but
 * wait for the flag to change, indicating this kernel is going away but
+1 −0
Original line number Original line Diff line number Diff line
@@ -120,6 +120,7 @@ struct mcontext32 {
	elf_fpregset_t		mc_fregs;
	elf_fpregset_t		mc_fregs;
	unsigned int		mc_pad[2];
	unsigned int		mc_pad[2];
	elf_vrregset_t32	mc_vregs __attribute__((__aligned__(16)));
	elf_vrregset_t32	mc_vregs __attribute__((__aligned__(16)));
	elf_vsrreghalf_t32      mc_vsregs __attribute__((__aligned__(16)));
};
};


struct ucontext32 { 
struct ucontext32 { 
Loading