Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit d0164ee2 authored by Hendrik Brueckner's avatar Hendrik Brueckner Committed by Martin Schwidefsky
Browse files

s390/kernel: remove save_fpu_regs() parameter and use __LC_CURRENT instead



All calls to save_fpu_regs() specify the fpu structure of the current task
pointer as parameter.  The task pointer of the current task can also be
retrieved from the CPU lowcore directly.  Remove the parameter definition,
load the __LC_CURRENT task pointer from the CPU lowcore, and rebase the FPU
structure onto the task structure.  Apply the same approach for the
load_fpu_regs() function.

Reviewed-by: default avatarHeiko Carstens <heiko.carstens@de.ibm.com>
Signed-off-by: default avatarHendrik Brueckner <brueckner@linux.vnet.ibm.com>
Signed-off-by: default avatarMartin Schwidefsky <schwidefsky@de.ibm.com>
parent 2a01bd1b
Loading
Loading
Loading
Loading
+1 −1
Original line number Diff line number Diff line
@@ -28,7 +28,7 @@ struct fpu {
	};
};

void save_fpu_regs(struct fpu *fpu);
void save_fpu_regs(void);

#define is_vx_fpu(fpu) (!!((fpu)->flags & FPU_USE_VX))
#define is_vx_task(tsk) (!!((tsk)->thread.fpu.flags & FPU_USE_VX))
+1 −1
Original line number Diff line number Diff line
@@ -30,7 +30,7 @@ static inline void restore_access_regs(unsigned int *acrs)

#define switch_to(prev,next,last) do {					\
	if (prev->mm) {							\
		save_fpu_regs(&prev->thread.fpu);			\
		save_fpu_regs();					\
		save_access_regs(&prev->thread.acrs[0]);		\
		save_ri_cb(prev->thread.ri_cb);				\
	}								\
+3 −5
Original line number Diff line number Diff line
@@ -28,16 +28,14 @@ int main(void)
	DEFINE(__TASK_pid, offsetof(struct task_struct, pid));
	BLANK();
	DEFINE(__THREAD_ksp, offsetof(struct thread_struct, ksp));
	DEFINE(__THREAD_fpu, offsetof(struct task_struct, thread.fpu));
	DEFINE(__THREAD_FPU_fpc, offsetof(struct thread_struct, fpu.fpc));
	DEFINE(__THREAD_FPU_flags, offsetof(struct thread_struct, fpu.flags));
	DEFINE(__THREAD_FPU_regs, offsetof(struct thread_struct, fpu.regs));
	DEFINE(__THREAD_per_cause, offsetof(struct thread_struct, per_event.cause));
	DEFINE(__THREAD_per_address, offsetof(struct thread_struct, per_event.address));
	DEFINE(__THREAD_per_paid, offsetof(struct thread_struct, per_event.paid));
	DEFINE(__THREAD_trap_tdb, offsetof(struct thread_struct, trap_tdb));
	BLANK();
	DEFINE(__FPU_fpc, offsetof(struct fpu, fpc));
	DEFINE(__FPU_flags, offsetof(struct fpu, flags));
	DEFINE(__FPU_regs, offsetof(struct fpu, regs));
	BLANK();
	DEFINE(__TI_task, offsetof(struct thread_info, task));
	DEFINE(__TI_flags, offsetof(struct thread_info, flags));
	DEFINE(__TI_sysc_table, offsetof(struct thread_info, sys_call_table));
+3 −3
Original line number Diff line number Diff line
@@ -154,7 +154,7 @@ int copy_siginfo_from_user32(siginfo_t *to, compat_siginfo_t __user *from)
static void store_sigregs(void)
{
	save_access_regs(current->thread.acrs);
	save_fpu_regs(&current->thread.fpu);
	save_fpu_regs();
}

/* Load registers after signal return */
@@ -286,7 +286,7 @@ COMPAT_SYSCALL_DEFINE0(sigreturn)
	if (__copy_from_user(&set.sig, &frame->sc.oldmask, _SIGMASK_COPY_SIZE32))
		goto badframe;
	set_current_blocked(&set);
	save_fpu_regs(&current->thread.fpu);
	save_fpu_regs();
	if (restore_sigregs32(regs, &frame->sregs))
		goto badframe;
	if (restore_sigregs_ext32(regs, &frame->sregs_ext))
@@ -309,7 +309,7 @@ COMPAT_SYSCALL_DEFINE0(rt_sigreturn)
	set_current_blocked(&set);
	if (compat_restore_altstack(&frame->uc.uc_stack))
		goto badframe;
	save_fpu_regs(&current->thread.fpu);
	save_fpu_regs();
	if (restore_sigregs32(regs, &frame->uc.uc_mcontext))
		goto badframe;
	if (restore_sigregs_ext32(regs, &frame->uc.uc_mcontext_ext))
+18 −18
Original line number Diff line number Diff line
@@ -183,7 +183,6 @@ ENTRY(sie64a)
	xc	__SF_EMPTY+16(16,%r15),__SF_EMPTY+16(%r15) # host id & reason
	tm	__LC_CPU_FLAGS+7,_CIF_FPU	# load guest fp/vx registers ?
	jno	.Lsie_load_guest_gprs
	lg	%r12,__LC_THREAD_INFO		# load fp/vx regs save area
	brasl	%r14,load_fpu_regs		# load guest fp/vx regs
.Lsie_load_guest_gprs:
	lmg	%r0,%r13,0(%r3)			# load guest gprs 0-13
@@ -752,14 +751,16 @@ ENTRY(psw_idle)
 * of the register contents at system call or io return.
 */
ENTRY(save_fpu_regs)
	lg	%r2,__LC_CURRENT
	aghi	%r2,__TASK_thread
	tm	__LC_CPU_FLAGS+7,_CIF_FPU
	bor	%r14
	stfpc	__FPU_fpc(%r2)
	stfpc	__THREAD_FPU_fpc(%r2)
.Lsave_fpu_regs_fpc_end:
	lg	%r3,__FPU_regs(%r2)
	lg	%r3,__THREAD_FPU_regs(%r2)
	ltgr	%r3,%r3
	jz	.Lsave_fpu_regs_done	  # no save area -> set CIF_FPU
	tm	__FPU_flags+3(%r2),FPU_USE_VX
	tm	__THREAD_FPU_flags+3(%r2),FPU_USE_VX
	jz	.Lsave_fpu_regs_fp	  # no -> store FP regs
.Lsave_fpu_regs_vx_low:
	VSTM	%v0,%v15,0,%r3		  # vstm 0,15,0(3)
@@ -794,20 +795,19 @@ ENTRY(save_fpu_regs)
 * FP/VX state, the vector-enablement control, CR0.46, is either set or cleared.
 *
 * There are special calling conventions to fit into sysc and io return work:
 *	%r12:	__LC_THREAD_INFO
 *	%r15:	<kernel stack>
 * The function requires:
 *	%r4 and __SF_EMPTY+32(%r15)
 */
load_fpu_regs:
	lg	%r4,__LC_CURRENT
	aghi	%r4,__TASK_thread
	tm	__LC_CPU_FLAGS+7,_CIF_FPU
	bnor	%r14
	lg	%r4,__TI_task(%r12)
	la	%r4,__THREAD_fpu(%r4)
	lfpc	__FPU_fpc(%r4)
	lfpc	__THREAD_FPU_fpc(%r4)
	stctg	%c0,%c0,__SF_EMPTY+32(%r15)	# store CR0
	tm	__FPU_flags+3(%r4),FPU_USE_VX	# VX-enabled task ?
	lg	%r4,__FPU_regs(%r4)		# %r4 <- reg save area
	tm	__THREAD_FPU_flags+3(%r4),FPU_USE_VX	# VX-enabled task ?
	lg	%r4,__THREAD_FPU_regs(%r4)	# %r4 <- reg save area
	jz	.Lload_fpu_regs_fp_ctl		# -> no VX, load FP regs
.Lload_fpu_regs_vx_ctl:
	tm	__SF_EMPTY+32+5(%r15),2		# test VX control
@@ -1190,13 +1190,14 @@ cleanup_critical:
	jhe	2f
	clg	%r9,BASED(.Lcleanup_save_fpu_fpc_end)
	jhe	1f
	lg	%r2,__LC_CURRENT
0:	# Store floating-point controls
	stfpc	__FPU_fpc(%r2)
	stfpc	__THREAD_FPU_fpc(%r2)
1:	# Load register save area and check if VX is active
	lg	%r3,__FPU_regs(%r2)
	lg	%r3,__THREAD_FPU_regs(%r2)
	ltgr	%r3,%r3
	jz	5f			  # no save area -> set CIF_FPU
	tm	__FPU_flags+3(%r2),FPU_USE_VX
	tm	__THREAD_FPU_flags+3(%r2),FPU_USE_VX
	jz	4f			  # no VX -> store FP regs
2:	# Store vector registers (V0-V15)
	VSTM	%v0,%v15,0,%r3		  # vstm 0,15,0(3)
@@ -1250,11 +1251,10 @@ cleanup_critical:
	jhe	5f
	clg	%r9,BASED(.Lcleanup_load_fpu_regs_vx_ctl)
	jhe	6f
	lg	%r4,__TI_task(%r12)
	la	%r4,__THREAD_fpu(%r4)
	lfpc	__FPU_fpc(%r4)
	tm	__FPU_flags+3(%r4),FPU_USE_VX	# VX-enabled task ?
	lg	%r4,__FPU_regs(%r4)		# %r4 <- reg save area
	lg	%r4,__LC_CURRENT
	lfpc	__THREAD_FPU_fpc(%r4)
	tm	__THREAD_FPU_flags+3(%r4),FPU_USE_VX	# VX-enabled task ?
	lg	%r4,__THREAD_FPU_regs(%r4)	# %r4 <- reg save area
	jz	3f				# -> no VX, load FP regs
6:	# Set VX-enablement control
	stctg	%c0,%c0,__SF_EMPTY+32(%r15)	# store CR0
Loading