Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 49b8c695 authored by H. Peter Anvin's avatar H. Peter Anvin
Browse files

Merge branch 'x86/fpu' into x86/smap



Reason for merge:
       x86/fpu changed the structure of some of the code that x86/smap
       changes; mostly fpu-internal.h but also minor changes to the
       signal code.

Signed-off-by: default avatarH. Peter Anvin <hpa@linux.intel.com>

Resolved Conflicts:
	arch/x86/ia32/ia32_signal.c
	arch/x86/include/asm/fpu-internal.h
	arch/x86/kernel/signal.c
parents e59d1b0a b1a74bf8
Loading
Loading
Loading
Loading
+6 −0
Original line number Original line Diff line number Diff line
@@ -1837,6 +1837,12 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
			and restore using xsave. The kernel will fallback to
			and restore using xsave. The kernel will fallback to
			enabling legacy floating-point and sse state.
			enabling legacy floating-point and sse state.


	eagerfpu=	[X86]
			on	enable eager fpu restore
			off	disable eager fpu restore
			auto	selects the default scheme, which automatically
				enables eagerfpu restore for xsaveopt.

	nohlt		[BUGS=ARM,SH] Tells the kernel that the sleep(SH) or
	nohlt		[BUGS=ARM,SH] Tells the kernel that the sleep(SH) or
			wfi(ARM) instruction doesn't work correctly and not to
			wfi(ARM) instruction doesn't work correctly and not to
			use it. This is also useful when using JTAG debugger.
			use it. This is also useful when using JTAG debugger.
+13 −8
Original line number Original line Diff line number Diff line
@@ -32,6 +32,7 @@
#include <asm/sigframe.h>
#include <asm/sigframe.h>
#include <asm/sighandling.h>
#include <asm/sighandling.h>
#include <asm/sys_ia32.h>
#include <asm/sys_ia32.h>
#include <asm/smap.h>


#define FIX_EFLAGS	__FIX_EFLAGS
#define FIX_EFLAGS	__FIX_EFLAGS


@@ -162,7 +163,8 @@ asmlinkage long sys32_sigaltstack(const stack_ia32_t __user *uss_ptr,
	}
	}
	seg = get_fs();
	seg = get_fs();
	set_fs(KERNEL_DS);
	set_fs(KERNEL_DS);
	ret = do_sigaltstack(uss_ptr ? &uss : NULL, &uoss, regs->sp);
	ret = do_sigaltstack((stack_t __force __user *) (uss_ptr ? &uss : NULL),
			     (stack_t __force __user *) &uoss, regs->sp);
	set_fs(seg);
	set_fs(seg);
	if (ret >= 0 && uoss_ptr)  {
	if (ret >= 0 && uoss_ptr)  {
		if (!access_ok(VERIFY_WRITE, uoss_ptr, sizeof(stack_ia32_t)))
		if (!access_ok(VERIFY_WRITE, uoss_ptr, sizeof(stack_ia32_t)))
@@ -254,7 +256,7 @@ static int ia32_restore_sigcontext(struct pt_regs *regs,
		get_user_ex(*pax, &sc->ax);
		get_user_ex(*pax, &sc->ax);
	} get_user_catch(err);
	} get_user_catch(err);


	err |= restore_i387_xstate_ia32(buf);
	err |= restore_xstate_sig(buf, 1);


	return err;
	return err;
}
}
@@ -362,7 +364,7 @@ static int ia32_setup_sigcontext(struct sigcontext_ia32 __user *sc,
 */
 */
static void __user *get_sigframe(struct k_sigaction *ka, struct pt_regs *regs,
static void __user *get_sigframe(struct k_sigaction *ka, struct pt_regs *regs,
				 size_t frame_size,
				 size_t frame_size,
				 void **fpstate)
				 void __user **fpstate)
{
{
	unsigned long sp;
	unsigned long sp;


@@ -382,9 +384,12 @@ static void __user *get_sigframe(struct k_sigaction *ka, struct pt_regs *regs,
		sp = (unsigned long) ka->sa.sa_restorer;
		sp = (unsigned long) ka->sa.sa_restorer;


	if (used_math()) {
	if (used_math()) {
		sp = sp - sig_xstate_ia32_size;
		unsigned long fx_aligned, math_size;
		*fpstate = (struct _fpstate_ia32 *) sp;

		if (save_i387_xstate_ia32(*fpstate) < 0)
		sp = alloc_mathframe(sp, 1, &fx_aligned, &math_size);
		*fpstate = (struct _fpstate_ia32 __user *) sp;
		if (save_xstate_sig(*fpstate, (void __user *)fx_aligned,
				    math_size) < 0)
			return (void __user *) -1L;
			return (void __user *) -1L;
	}
	}


@@ -449,7 +454,7 @@ int ia32_setup_frame(int sig, struct k_sigaction *ka,
		 * These are actually not used anymore, but left because some
		 * These are actually not used anymore, but left because some
		 * gdb versions depend on them as a marker.
		 * gdb versions depend on them as a marker.
		 */
		 */
		put_user_ex(*((u64 *)&code), (u64 *)frame->retcode);
		put_user_ex(*((u64 *)&code), (u64 __user *)frame->retcode);
	} put_user_catch(err);
	} put_user_catch(err);


	if (err)
	if (err)
@@ -526,7 +531,7 @@ int ia32_setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
		 * Not actually used anymore, but left because some gdb
		 * Not actually used anymore, but left because some gdb
		 * versions need it.
		 * versions need it.
		 */
		 */
		put_user_ex(*((u64 *)&code), (u64 *)frame->retcode);
		put_user_ex(*((u64 *)&code), (u64 __user *)frame->retcode);
	} put_user_catch(err);
	} put_user_catch(err);


	err |= copy_siginfo_to_user32(&frame->info, info);
	err |= copy_siginfo_to_user32(&frame->info, info);
+1 −1
Original line number Original line Diff line number Diff line
@@ -287,7 +287,7 @@ asmlinkage long sys32_sigaction(int sig, struct old_sigaction32 __user *act,
	return ret;
	return ret;
}
}


asmlinkage long sys32_waitpid(compat_pid_t pid, unsigned int *stat_addr,
asmlinkage long sys32_waitpid(compat_pid_t pid, unsigned int __user *stat_addr,
			      int options)
			      int options)
{
{
	return compat_sys_wait4(pid, stat_addr, options, NULL);
	return compat_sys_wait4(pid, stat_addr, options, NULL);
+3 −0
Original line number Original line Diff line number Diff line
@@ -97,6 +97,7 @@
#define X86_FEATURE_EXTD_APICID	(3*32+26) /* has extended APICID (8 bits) */
#define X86_FEATURE_EXTD_APICID	(3*32+26) /* has extended APICID (8 bits) */
#define X86_FEATURE_AMD_DCM     (3*32+27) /* multi-node processor */
#define X86_FEATURE_AMD_DCM     (3*32+27) /* multi-node processor */
#define X86_FEATURE_APERFMPERF	(3*32+28) /* APERFMPERF */
#define X86_FEATURE_APERFMPERF	(3*32+28) /* APERFMPERF */
#define X86_FEATURE_EAGER_FPU	(3*32+29) /* "eagerfpu" Non lazy FPU restore */


/* Intel-defined CPU features, CPUID level 0x00000001 (ecx), word 4 */
/* Intel-defined CPU features, CPUID level 0x00000001 (ecx), word 4 */
#define X86_FEATURE_XMM3	(4*32+ 0) /* "pni" SSE-3 */
#define X86_FEATURE_XMM3	(4*32+ 0) /* "pni" SSE-3 */
@@ -300,12 +301,14 @@ extern const char * const x86_power_flags[32];
#define cpu_has_xmm4_2		boot_cpu_has(X86_FEATURE_XMM4_2)
#define cpu_has_xmm4_2		boot_cpu_has(X86_FEATURE_XMM4_2)
#define cpu_has_x2apic		boot_cpu_has(X86_FEATURE_X2APIC)
#define cpu_has_x2apic		boot_cpu_has(X86_FEATURE_X2APIC)
#define cpu_has_xsave		boot_cpu_has(X86_FEATURE_XSAVE)
#define cpu_has_xsave		boot_cpu_has(X86_FEATURE_XSAVE)
#define cpu_has_xsaveopt	boot_cpu_has(X86_FEATURE_XSAVEOPT)
#define cpu_has_osxsave		boot_cpu_has(X86_FEATURE_OSXSAVE)
#define cpu_has_osxsave		boot_cpu_has(X86_FEATURE_OSXSAVE)
#define cpu_has_hypervisor	boot_cpu_has(X86_FEATURE_HYPERVISOR)
#define cpu_has_hypervisor	boot_cpu_has(X86_FEATURE_HYPERVISOR)
#define cpu_has_pclmulqdq	boot_cpu_has(X86_FEATURE_PCLMULQDQ)
#define cpu_has_pclmulqdq	boot_cpu_has(X86_FEATURE_PCLMULQDQ)
#define cpu_has_perfctr_core	boot_cpu_has(X86_FEATURE_PERFCTR_CORE)
#define cpu_has_perfctr_core	boot_cpu_has(X86_FEATURE_PERFCTR_CORE)
#define cpu_has_cx8		boot_cpu_has(X86_FEATURE_CX8)
#define cpu_has_cx8		boot_cpu_has(X86_FEATURE_CX8)
#define cpu_has_cx16		boot_cpu_has(X86_FEATURE_CX16)
#define cpu_has_cx16		boot_cpu_has(X86_FEATURE_CX16)
#define cpu_has_eager_fpu	boot_cpu_has(X86_FEATURE_EAGER_FPU)


#if defined(CONFIG_X86_INVLPG) || defined(CONFIG_X86_64)
#if defined(CONFIG_X86_INVLPG) || defined(CONFIG_X86_64)
# define cpu_has_invlpg		1
# define cpu_has_invlpg		1
+244 −167
Original line number Original line Diff line number Diff line
@@ -12,6 +12,7 @@


#include <linux/kernel_stat.h>
#include <linux/kernel_stat.h>
#include <linux/regset.h>
#include <linux/regset.h>
#include <linux/compat.h>
#include <linux/slab.h>
#include <linux/slab.h>
#include <asm/asm.h>
#include <asm/asm.h>
#include <asm/cpufeature.h>
#include <asm/cpufeature.h>
@@ -20,43 +21,76 @@
#include <asm/user.h>
#include <asm/user.h>
#include <asm/uaccess.h>
#include <asm/uaccess.h>
#include <asm/xsave.h>
#include <asm/xsave.h>
#include <asm/smap.h>


extern unsigned int sig_xstate_size;
#ifdef CONFIG_X86_64
# include <asm/sigcontext32.h>
# include <asm/user32.h>
int ia32_setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
			compat_sigset_t *set, struct pt_regs *regs);
int ia32_setup_frame(int sig, struct k_sigaction *ka,
		     compat_sigset_t *set, struct pt_regs *regs);
#else
# define user_i387_ia32_struct	user_i387_struct
# define user32_fxsr_struct	user_fxsr_struct
# define ia32_setup_frame	__setup_frame
# define ia32_setup_rt_frame	__setup_rt_frame
#endif

extern unsigned int mxcsr_feature_mask;
extern void fpu_init(void);
extern void fpu_init(void);
extern void eager_fpu_init(void);


DECLARE_PER_CPU(struct task_struct *, fpu_owner_task);
DECLARE_PER_CPU(struct task_struct *, fpu_owner_task);


extern void convert_from_fxsr(struct user_i387_ia32_struct *env,
			      struct task_struct *tsk);
extern void convert_to_fxsr(struct task_struct *tsk,
			    const struct user_i387_ia32_struct *env);

extern user_regset_active_fn fpregs_active, xfpregs_active;
extern user_regset_active_fn fpregs_active, xfpregs_active;
extern user_regset_get_fn fpregs_get, xfpregs_get, fpregs_soft_get,
extern user_regset_get_fn fpregs_get, xfpregs_get, fpregs_soft_get,
				xstateregs_get;
				xstateregs_get;
extern user_regset_set_fn fpregs_set, xfpregs_set, fpregs_soft_set,
extern user_regset_set_fn fpregs_set, xfpregs_set, fpregs_soft_set,
				 xstateregs_set;
				 xstateregs_set;



/*
/*
 * xstateregs_active == fpregs_active. Please refer to the comment
 * xstateregs_active == fpregs_active. Please refer to the comment
 * at the definition of fpregs_active.
 * at the definition of fpregs_active.
 */
 */
#define xstateregs_active	fpregs_active
#define xstateregs_active	fpregs_active


extern struct _fpx_sw_bytes fx_sw_reserved;
#ifdef CONFIG_IA32_EMULATION
extern unsigned int sig_xstate_ia32_size;
extern struct _fpx_sw_bytes fx_sw_reserved_ia32;
struct _fpstate_ia32;
struct _xstate_ia32;
extern int save_i387_xstate_ia32(void __user *buf);
extern int restore_i387_xstate_ia32(void __user *buf);
#endif

#ifdef CONFIG_MATH_EMULATION
#ifdef CONFIG_MATH_EMULATION
# define HAVE_HWFP		(boot_cpu_data.hard_math)
extern void finit_soft_fpu(struct i387_soft_struct *soft);
extern void finit_soft_fpu(struct i387_soft_struct *soft);
#else
#else
# define HAVE_HWFP		1
static inline void finit_soft_fpu(struct i387_soft_struct *soft) {}
static inline void finit_soft_fpu(struct i387_soft_struct *soft) {}
#endif
#endif


static inline int is_ia32_compat_frame(void)
{
	return config_enabled(CONFIG_IA32_EMULATION) &&
	       test_thread_flag(TIF_IA32);
}

static inline int is_ia32_frame(void)
{
	return config_enabled(CONFIG_X86_32) || is_ia32_compat_frame();
}

static inline int is_x32_frame(void)
{
	return config_enabled(CONFIG_X86_X32_ABI) && test_thread_flag(TIF_X32);
}

#define X87_FSW_ES (1 << 7)	/* Exception Summary */
#define X87_FSW_ES (1 << 7)	/* Exception Summary */


static __always_inline __pure bool use_eager_fpu(void)
{
	return static_cpu_has(X86_FEATURE_EAGER_FPU);
}

static __always_inline __pure bool use_xsaveopt(void)
static __always_inline __pure bool use_xsaveopt(void)
{
{
	return static_cpu_has(X86_FEATURE_XSAVEOPT);
	return static_cpu_has(X86_FEATURE_XSAVEOPT);
@@ -72,6 +106,13 @@ static __always_inline __pure bool use_fxsr(void)
        return static_cpu_has(X86_FEATURE_FXSR);
        return static_cpu_has(X86_FEATURE_FXSR);
}
}


static inline void fx_finit(struct i387_fxsave_struct *fx)
{
	memset(fx, 0, xstate_size);
	fx->cwd = 0x37f;
	fx->mxcsr = MXCSR_DEFAULT;
}

extern void __sanitize_i387_state(struct task_struct *);
extern void __sanitize_i387_state(struct task_struct *);


static inline void sanitize_i387_state(struct task_struct *tsk)
static inline void sanitize_i387_state(struct task_struct *tsk)
@@ -81,132 +122,103 @@ static inline void sanitize_i387_state(struct task_struct *tsk)
	__sanitize_i387_state(tsk);
	__sanitize_i387_state(tsk);
}
}


#ifdef CONFIG_X86_64
#define user_insn(insn, output, input...)				\
static inline int fxrstor_checking(struct i387_fxsave_struct *fx)
({									\
{
	int err;							\
	int err;
	asm volatile(ASM_STAC "\n"					\

		     "1:" #insn "\n\t"					\
	/* See comment in fxsave() below. */
		     "2: " ASM_CLAC "\n"				\
#ifdef CONFIG_AS_FXSAVEQ
		     ".section .fixup,\"ax\"\n"				\
	asm volatile("1:  fxrstorq %[fx]\n\t"
		     "3:  movl $-1,%[err]\n"				\
		     "2:\n"
		     "    jmp  2b\n"					\
		     ".section .fixup,\"ax\"\n"
		     ".previous\n"					\
		     "3:  movl $-1,%[err]\n"
		     _ASM_EXTABLE(1b, 3b)				\
		     "    jmp  2b\n"
		     : [err] "=r" (err), output				\
		     ".previous\n"
		     : "0"(0), input);					\
		     _ASM_EXTABLE(1b, 3b)
	err;								\
		     : [err] "=r" (err)
})
		     : [fx] "m" (*fx), "0" (0));

#else
#define check_insn(insn, output, input...)				\
	asm volatile("1:  rex64/fxrstor (%[fx])\n\t"
({									\
		     "2:\n"
	int err;							\
		     ".section .fixup,\"ax\"\n"
	asm volatile("1:" #insn "\n\t"					\
		     "3:  movl $-1,%[err]\n"
		     "2:\n"						\
		     "    jmp  2b\n"
		     ".section .fixup,\"ax\"\n"				\
		     ".previous\n"
		     "3:  movl $-1,%[err]\n"				\
		     _ASM_EXTABLE(1b, 3b)
		     "    jmp  2b\n"					\
		     : [err] "=r" (err)
		     ".previous\n"					\
		     : [fx] "R" (fx), "m" (*fx), "0" (0));
		     _ASM_EXTABLE(1b, 3b)				\
#endif
		     : [err] "=r" (err), output				\
	return err;
		     : "0"(0), input);					\
	err;								\
})

static inline int fsave_user(struct i387_fsave_struct __user *fx)
{
	return user_insn(fnsave %[fx]; fwait,  [fx] "=m" (*fx), "m" (*fx));
}
}


static inline int fxsave_user(struct i387_fxsave_struct __user *fx)
static inline int fxsave_user(struct i387_fxsave_struct __user *fx)
{
{
	int err;
	if (config_enabled(CONFIG_X86_32))
		return user_insn(fxsave %[fx], [fx] "=m" (*fx), "m" (*fx));
	else if (config_enabled(CONFIG_AS_FXSAVEQ))
		return user_insn(fxsaveq %[fx], [fx] "=m" (*fx), "m" (*fx));


	/*
	/* See comment in fpu_fxsave() below. */
	 * Clear the bytes not touched by the fxsave and reserved
	return user_insn(rex64/fxsave (%[fx]), "=m" (*fx), [fx] "R" (fx));
	 * for the SW usage.
	 */
	err = __clear_user(&fx->sw_reserved,
			   sizeof(struct _fpx_sw_bytes));
	if (unlikely(err))
		return -EFAULT;

	/* See comment in fxsave() below. */
#ifdef CONFIG_AS_FXSAVEQ
	asm volatile(ASM_STAC "\n"
		     "1:  fxsaveq %[fx]\n\t"
		     "2: " ASM_CLAC "\n"
		     ".section .fixup,\"ax\"\n"
		     "3:  movl $-1,%[err]\n"
		     "    jmp  2b\n"
		     ".previous\n"
		     _ASM_EXTABLE(1b, 3b)
		     : [err] "=r" (err), [fx] "=m" (*fx)
		     : "0" (0));
#else
	asm volatile(ASM_STAC "\n"
		     "1:  rex64/fxsave (%[fx])\n\t"
		     "2: " ASM_CLAC "\n"
		     ".section .fixup,\"ax\"\n"
		     "3:  movl $-1,%[err]\n"
		     "    jmp  2b\n"
		     ".previous\n"
		     _ASM_EXTABLE(1b, 3b)
		     : [err] "=r" (err), "=m" (*fx)
		     : [fx] "R" (fx), "0" (0));
#endif
	if (unlikely(err) &&
	    __clear_user(fx, sizeof(struct i387_fxsave_struct)))
		err = -EFAULT;
	/* No need to clear here because the caller clears USED_MATH */
	return err;
}
}


static inline void fpu_fxsave(struct fpu *fpu)
static inline int fxrstor_checking(struct i387_fxsave_struct *fx)
{
{
	/* Using "rex64; fxsave %0" is broken because, if the memory operand
	if (config_enabled(CONFIG_X86_32))
	   uses any extended registers for addressing, a second REX prefix
		return check_insn(fxrstor %[fx], "=m" (*fx), [fx] "m" (*fx));
	   will be generated (to the assembler, rex64 followed by semicolon
	else if (config_enabled(CONFIG_AS_FXSAVEQ))
	   is a separate instruction), and hence the 64-bitness is lost. */
		return check_insn(fxrstorq %[fx], "=m" (*fx), [fx] "m" (*fx));


#ifdef CONFIG_AS_FXSAVEQ
	/* See comment in fpu_fxsave() below. */
	/* Using "fxsaveq %0" would be the ideal choice, but is only supported
	return check_insn(rex64/fxrstor (%[fx]), "=m" (*fx), [fx] "R" (fx),
	   starting with gas 2.16. */
			  "m" (*fx));
	__asm__ __volatile__("fxsaveq %0"
			     : "=m" (fpu->state->fxsave));
#else
	/* Using, as a workaround, the properly prefixed form below isn't
	   accepted by any binutils version so far released, complaining that
	   the same type of prefix is used twice if an extended register is
	   needed for addressing (fix submitted to mainline 2005-11-21).
	asm volatile("rex64/fxsave %0"
		     : "=m" (fpu->state->fxsave));
	   This, however, we can work around by forcing the compiler to select
	   an addressing mode that doesn't require extended registers. */
	asm volatile("rex64/fxsave (%[fx])"
		     : "=m" (fpu->state->fxsave)
		     : [fx] "R" (&fpu->state->fxsave));
#endif
}
}


#else  /* CONFIG_X86_32 */
static inline int frstor_checking(struct i387_fsave_struct *fx)

/* perform fxrstor iff the processor has extended states, otherwise frstor */
static inline int fxrstor_checking(struct i387_fxsave_struct *fx)
{
{
	/*
	return check_insn(frstor %[fx], "=m" (*fx), [fx] "m" (*fx));
	 * The "nop" is needed to make the instructions the same
	 * length.
	 */
	alternative_input(
		"nop ; frstor %1",
		"fxrstor %1",
		X86_FEATURE_FXSR,
		"m" (*fx));

	return 0;
}
}


static inline void fpu_fxsave(struct fpu *fpu)
static inline void fpu_fxsave(struct fpu *fpu)
{
{
	asm volatile("fxsave %[fx]"
	if (config_enabled(CONFIG_X86_32))
		     : [fx] "=m" (fpu->state->fxsave));
		asm volatile( "fxsave %[fx]" : [fx] "=m" (fpu->state->fxsave));
	else if (config_enabled(CONFIG_AS_FXSAVEQ))
		asm volatile("fxsaveq %0" : "=m" (fpu->state->fxsave));
	else {
		/* Using "rex64; fxsave %0" is broken because, if the memory
		 * operand uses any extended registers for addressing, a second
		 * REX prefix will be generated (to the assembler, rex64
		 * followed by semicolon is a separate instruction), and hence
		 * the 64-bitness is lost.
		 *
		 * Using "fxsaveq %0" would be the ideal choice, but is only
		 * supported starting with gas 2.16.
		 *
		 * Using, as a workaround, the properly prefixed form below
		 * isn't accepted by any binutils version so far released,
		 * complaining that the same type of prefix is used twice if
		 * an extended register is needed for addressing (fix submitted
		 * to mainline 2005-11-21).
		 *
		 *  asm volatile("rex64/fxsave %0" : "=m" (fpu->state->fxsave));
		 *
		 * This, however, we can work around by forcing the compiler to
		 * select an addressing mode that doesn't require extended
		 * registers.
		 */
		asm volatile( "rex64/fxsave (%[fx])"
			     : "=m" (fpu->state->fxsave)
			     : [fx] "R" (&fpu->state->fxsave));
	}
}
}

#endif	/* CONFIG_X86_64 */


/*
/*
 * These must be called with preempt disabled. Returns
 * These must be called with preempt disabled. Returns
@@ -250,17 +262,14 @@ static inline int __save_init_fpu(struct task_struct *tsk)
	return fpu_save_init(&tsk->thread.fpu);
	return fpu_save_init(&tsk->thread.fpu);
}
}


static inline int fpu_fxrstor_checking(struct fpu *fpu)
{
	return fxrstor_checking(&fpu->state->fxsave);
}

static inline int fpu_restore_checking(struct fpu *fpu)
static inline int fpu_restore_checking(struct fpu *fpu)
{
{
	if (use_xsave())
	if (use_xsave())
		return fpu_xrstor_checking(fpu);
		return fpu_xrstor_checking(&fpu->state->xsave);
	else if (use_fxsr())
		return fxrstor_checking(&fpu->state->fxsave);
	else
	else
		return fpu_fxrstor_checking(fpu);
		return frstor_checking(&fpu->state->fsave);
}
}


static inline int restore_fpu_checking(struct task_struct *tsk)
static inline int restore_fpu_checking(struct task_struct *tsk)
@@ -312,15 +321,52 @@ static inline void __thread_set_has_fpu(struct task_struct *tsk)
static inline void __thread_fpu_end(struct task_struct *tsk)
static inline void __thread_fpu_end(struct task_struct *tsk)
{
{
	__thread_clear_has_fpu(tsk);
	__thread_clear_has_fpu(tsk);
	if (!use_eager_fpu())
		stts();
		stts();
}
}


static inline void __thread_fpu_begin(struct task_struct *tsk)
static inline void __thread_fpu_begin(struct task_struct *tsk)
{
{
	if (!use_eager_fpu())
		clts();
		clts();
	__thread_set_has_fpu(tsk);
	__thread_set_has_fpu(tsk);
}
}


static inline void __drop_fpu(struct task_struct *tsk)
{
	if (__thread_has_fpu(tsk)) {
		/* Ignore delayed exceptions from user space */
		asm volatile("1: fwait\n"
			     "2:\n"
			     _ASM_EXTABLE(1b, 2b));
		__thread_fpu_end(tsk);
	}
}

static inline void drop_fpu(struct task_struct *tsk)
{
	/*
	 * Forget coprocessor state..
	 */
	preempt_disable();
	tsk->fpu_counter = 0;
	__drop_fpu(tsk);
	clear_used_math();
	preempt_enable();
}

static inline void drop_init_fpu(struct task_struct *tsk)
{
	if (!use_eager_fpu())
		drop_fpu(tsk);
	else {
		if (use_xsave())
			xrstor_state(init_xstate_buf, -1);
		else
			fxrstor_checking(&init_xstate_buf->i387);
	}
}

/*
/*
 * FPU state switching for scheduling.
 * FPU state switching for scheduling.
 *
 *
@@ -354,7 +400,12 @@ static inline fpu_switch_t switch_fpu_prepare(struct task_struct *old, struct ta
{
{
	fpu_switch_t fpu;
	fpu_switch_t fpu;


	fpu.preload = tsk_used_math(new) && new->fpu_counter > 5;
	/*
	 * If the task has used the math, pre-load the FPU on xsave processors
	 * or if the past 5 consecutive context-switches used math.
	 */
	fpu.preload = tsk_used_math(new) && (use_eager_fpu() ||
					     new->fpu_counter > 5);
	if (__thread_has_fpu(old)) {
	if (__thread_has_fpu(old)) {
		if (!__save_init_fpu(old))
		if (!__save_init_fpu(old))
			cpu = ~0;
			cpu = ~0;
@@ -366,14 +417,14 @@ static inline fpu_switch_t switch_fpu_prepare(struct task_struct *old, struct ta
			new->fpu_counter++;
			new->fpu_counter++;
			__thread_set_has_fpu(new);
			__thread_set_has_fpu(new);
			prefetch(new->thread.fpu.state);
			prefetch(new->thread.fpu.state);
		} else
		} else if (!use_eager_fpu())
			stts();
			stts();
	} else {
	} else {
		old->fpu_counter = 0;
		old->fpu_counter = 0;
		old->thread.fpu.last_cpu = ~0;
		old->thread.fpu.last_cpu = ~0;
		if (fpu.preload) {
		if (fpu.preload) {
			new->fpu_counter++;
			new->fpu_counter++;
			if (fpu_lazy_restore(new, cpu))
			if (!use_eager_fpu() && fpu_lazy_restore(new, cpu))
				fpu.preload = 0;
				fpu.preload = 0;
			else
			else
				prefetch(new->thread.fpu.state);
				prefetch(new->thread.fpu.state);
@@ -393,44 +444,40 @@ static inline void switch_fpu_finish(struct task_struct *new, fpu_switch_t fpu)
{
{
	if (fpu.preload) {
	if (fpu.preload) {
		if (unlikely(restore_fpu_checking(new)))
		if (unlikely(restore_fpu_checking(new)))
			__thread_fpu_end(new);
			drop_init_fpu(new);
	}
	}
}
}


/*
/*
 * Signal frame handlers...
 * Signal frame handlers...
 */
 */
extern int save_i387_xstate(void __user *buf);
extern int save_xstate_sig(void __user *buf, void __user *fx, int size);
extern int restore_i387_xstate(void __user *buf);
extern int __restore_xstate_sig(void __user *buf, void __user *fx, int size);


static inline void __clear_fpu(struct task_struct *tsk)
static inline int xstate_sigframe_size(void)
{
{
	if (__thread_has_fpu(tsk)) {
	return use_xsave() ? xstate_size + FP_XSTATE_MAGIC2_SIZE : xstate_size;
		/* Ignore delayed exceptions from user space */
		asm volatile("1: fwait\n"
			     "2:\n"
			     _ASM_EXTABLE(1b, 2b));
		__thread_fpu_end(tsk);
}
}

static inline int restore_xstate_sig(void __user *buf, int ia32_frame)
{
	void __user *buf_fx = buf;
	int size = xstate_sigframe_size();

	if (ia32_frame && use_fxsr()) {
		buf_fx = buf + sizeof(struct i387_fsave_struct);
		size += sizeof(struct i387_fsave_struct);
	}

	return __restore_xstate_sig(buf, buf_fx, size);
}
}


/*
/*
 * The actual user_fpu_begin/end() functions
 * Need to be preemption-safe.
 * need to be preemption-safe.
 *
 *
 * NOTE! user_fpu_end() must be used only after you
 * NOTE! user_fpu_begin() must be used only immediately before restoring
 * have saved the FP state, and user_fpu_begin() must
 * it. This function does not do any save/restore on their own.
 * be used only immediately before restoring it.
 * These functions do not do any save/restore on
 * their own.
 */
 */
static inline void user_fpu_end(void)
{
	preempt_disable();
	__thread_fpu_end(current);
	preempt_enable();
}

static inline void user_fpu_begin(void)
static inline void user_fpu_begin(void)
{
{
	preempt_disable();
	preempt_disable();
@@ -439,22 +486,29 @@ static inline void user_fpu_begin(void)
	preempt_enable();
	preempt_enable();
}
}


static inline void __save_fpu(struct task_struct *tsk)
{
	if (use_xsave())
		xsave_state(&tsk->thread.fpu.state->xsave, -1);
	else
		fpu_fxsave(&tsk->thread.fpu);
}

/*
/*
 * These disable preemption on their own and are safe
 * These disable preemption on their own and are safe
 */
 */
static inline void save_init_fpu(struct task_struct *tsk)
static inline void save_init_fpu(struct task_struct *tsk)
{
{
	WARN_ON_ONCE(!__thread_has_fpu(tsk));
	WARN_ON_ONCE(!__thread_has_fpu(tsk));
	preempt_disable();

	__save_init_fpu(tsk);
	if (use_eager_fpu()) {
	__thread_fpu_end(tsk);
		__save_fpu(tsk);
	preempt_enable();
		return;
	}
	}


static inline void clear_fpu(struct task_struct *tsk)
{
	preempt_disable();
	preempt_disable();
	__clear_fpu(tsk);
	__save_init_fpu(tsk);
	__thread_fpu_end(tsk);
	preempt_enable();
	preempt_enable();
}
}


@@ -512,11 +566,34 @@ static inline void fpu_free(struct fpu *fpu)
	}
	}
}
}


static inline void fpu_copy(struct fpu *dst, struct fpu *src)
static inline void fpu_copy(struct task_struct *dst, struct task_struct *src)
{
{
	memcpy(dst->state, src->state, xstate_size);
	if (use_eager_fpu()) {
		memset(&dst->thread.fpu.state->xsave, 0, xstate_size);
		__save_fpu(dst);
	} else {
		struct fpu *dfpu = &dst->thread.fpu;
		struct fpu *sfpu = &src->thread.fpu;

		unlazy_fpu(src);
		memcpy(dfpu->state, sfpu->state, xstate_size);
	}
}

static inline unsigned long
alloc_mathframe(unsigned long sp, int ia32_frame, unsigned long *buf_fx,
		unsigned long *size)
{
	unsigned long frame_size = xstate_sigframe_size();

	*buf_fx = sp = round_down(sp - frame_size, 64);
	if (ia32_frame && use_fxsr()) {
		frame_size += sizeof(struct i387_fsave_struct);
		sp -= sizeof(struct i387_fsave_struct);
	}
	}


extern void fpu_finit(struct fpu *fpu);
	*size = frame_size;
	return sp;
}


#endif
#endif
Loading