Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 0ca5bd0d authored by Suresh Siddha's avatar Suresh Siddha Committed by H. Peter Anvin
Browse files

x86, fpu: Consolidate inline asm routines for saving/restoring fpu state



Consolidate x86, x86_64 inline asm routines saving/restoring fpu state
using config_enabled().

Signed-off-by: default avatarSuresh Siddha <suresh.b.siddha@intel.com>
Link: http://lkml.kernel.org/r/1343171129-2747-3-git-send-email-suresh.b.siddha@intel.com


Signed-off-by: default avatarH. Peter Anvin <hpa@linux.intel.com>
parent 050902c0
Loading
Loading
Loading
Loading
+77 −107
Original line number Original line Diff line number Diff line
@@ -97,34 +97,24 @@ static inline void sanitize_i387_state(struct task_struct *tsk)
	__sanitize_i387_state(tsk);
	__sanitize_i387_state(tsk);
}
}


#ifdef CONFIG_X86_64
#define check_insn(insn, output, input...)				\
static inline int fxrstor_checking(struct i387_fxsave_struct *fx)
({									\
{
	int err;							\
	int err;
	asm volatile("1:" #insn "\n\t"					\

		     "2:\n"						\
	/* See comment in fxsave() below. */
		     ".section .fixup,\"ax\"\n"				\
#ifdef CONFIG_AS_FXSAVEQ
		     "3:  movl $-1,%[err]\n"				\
	asm volatile("1:  fxrstorq %[fx]\n\t"
		     "    jmp  2b\n"					\
		     "2:\n"
		     ".previous\n"					\
		     ".section .fixup,\"ax\"\n"
		     _ASM_EXTABLE(1b, 3b)				\
		     "3:  movl $-1,%[err]\n"
		     : [err] "=r" (err), output				\
		     "    jmp  2b\n"
		     : "0"(0), input);					\
		     ".previous\n"
	err;								\
		     _ASM_EXTABLE(1b, 3b)
})
		     : [err] "=r" (err)

		     : [fx] "m" (*fx), "0" (0));
static inline int fsave_user(struct i387_fsave_struct __user *fx)
#else
{
	asm volatile("1:  rex64/fxrstor (%[fx])\n\t"
	return check_insn(fnsave %[fx]; fwait,  [fx] "=m" (*fx), "m" (*fx));
		     "2:\n"
		     ".section .fixup,\"ax\"\n"
		     "3:  movl $-1,%[err]\n"
		     "    jmp  2b\n"
		     ".previous\n"
		     _ASM_EXTABLE(1b, 3b)
		     : [err] "=r" (err)
		     : [fx] "R" (fx), "m" (*fx), "0" (0));
#endif
	return err;
}
}


static inline int fxsave_user(struct i387_fxsave_struct __user *fx)
static inline int fxsave_user(struct i387_fxsave_struct __user *fx)
@@ -140,61 +130,66 @@ static inline int fxsave_user(struct i387_fxsave_struct __user *fx)
	if (unlikely(err))
	if (unlikely(err))
		return -EFAULT;
		return -EFAULT;


	/* See comment in fxsave() below. */
	if (config_enabled(CONFIG_X86_32))
#ifdef CONFIG_AS_FXSAVEQ
		return check_insn(fxsave %[fx], [fx] "=m" (*fx), "m" (*fx));
	asm volatile("1:  fxsaveq %[fx]\n\t"
	else if (config_enabled(CONFIG_AS_FXSAVEQ))
		     "2:\n"
		return check_insn(fxsaveq %[fx], [fx] "=m" (*fx), "m" (*fx));
		     ".section .fixup,\"ax\"\n"

		     "3:  movl $-1,%[err]\n"
	/* See comment in fpu_fxsave() below. */
		     "    jmp  2b\n"
	return check_insn(rex64/fxsave (%[fx]), "=m" (*fx), [fx] "R" (fx));
		     ".previous\n"
		     _ASM_EXTABLE(1b, 3b)
		     : [err] "=r" (err), [fx] "=m" (*fx)
		     : "0" (0));
#else
	asm volatile("1:  rex64/fxsave (%[fx])\n\t"
		     "2:\n"
		     ".section .fixup,\"ax\"\n"
		     "3:  movl $-1,%[err]\n"
		     "    jmp  2b\n"
		     ".previous\n"
		     _ASM_EXTABLE(1b, 3b)
		     : [err] "=r" (err), "=m" (*fx)
		     : [fx] "R" (fx), "0" (0));
#endif
	if (unlikely(err) &&
	    __clear_user(fx, sizeof(struct i387_fxsave_struct)))
		err = -EFAULT;
	/* No need to clear here because the caller clears USED_MATH */
	return err;
}
}


static inline void fpu_fxsave(struct fpu *fpu)
static inline int fxrstor_checking(struct i387_fxsave_struct *fx)
{
{
	/* Using "rex64; fxsave %0" is broken because, if the memory operand
	if (config_enabled(CONFIG_X86_32))
	   uses any extended registers for addressing, a second REX prefix
		return check_insn(fxrstor %[fx], "=m" (*fx), [fx] "m" (*fx));
	   will be generated (to the assembler, rex64 followed by semicolon
	else if (config_enabled(CONFIG_AS_FXSAVEQ))
	   is a separate instruction), and hence the 64-bitness is lost. */
		return check_insn(fxrstorq %[fx], "=m" (*fx), [fx] "m" (*fx));


#ifdef CONFIG_AS_FXSAVEQ
	/* See comment in fpu_fxsave() below. */
	/* Using "fxsaveq %0" would be the ideal choice, but is only supported
	return check_insn(rex64/fxrstor (%[fx]), "=m" (*fx), [fx] "R" (fx),
	   starting with gas 2.16. */
			  "m" (*fx));
	__asm__ __volatile__("fxsaveq %0"
}
			     : "=m" (fpu->state->fxsave));

#else
static inline int frstor_checking(struct i387_fsave_struct *fx)
	/* Using, as a workaround, the properly prefixed form below isn't
{
	   accepted by any binutils version so far released, complaining that
	return check_insn(frstor %[fx], "=m" (*fx), [fx] "m" (*fx));
	   the same type of prefix is used twice if an extended register is
}
	   needed for addressing (fix submitted to mainline 2005-11-21).

	asm volatile("rex64/fxsave %0"
static inline void fpu_fxsave(struct fpu *fpu)
		     : "=m" (fpu->state->fxsave));
{
	   This, however, we can work around by forcing the compiler to select
	if (config_enabled(CONFIG_X86_32))
	   an addressing mode that doesn't require extended registers. */
		asm volatile( "fxsave %[fx]" : [fx] "=m" (fpu->state->fxsave));
	else if (config_enabled(CONFIG_AS_FXSAVEQ))
		asm volatile("fxsaveq %0" : "=m" (fpu->state->fxsave));
	else {
		/* Using "rex64; fxsave %0" is broken because, if the memory
		 * operand uses any extended registers for addressing, a second
		 * REX prefix will be generated (to the assembler, rex64
		 * followed by semicolon is a separate instruction), and hence
		 * the 64-bitness is lost.
		 *
		 * Using "fxsaveq %0" would be the ideal choice, but is only
		 * supported starting with gas 2.16.
		 *
		 * Using, as a workaround, the properly prefixed form below
		 * isn't accepted by any binutils version so far released,
		 * complaining that the same type of prefix is used twice if
		 * an extended register is needed for addressing (fix submitted
		 * to mainline 2005-11-21).
		 *
		 *  asm volatile("rex64/fxsave %0" : "=m" (fpu->state->fxsave));
		 *
		 * This, however, we can work around by forcing the compiler to
		 * select an addressing mode that doesn't require extended
		 * registers.
		 */
		asm volatile( "rex64/fxsave (%[fx])"
		asm volatile( "rex64/fxsave (%[fx])"
			     : "=m" (fpu->state->fxsave)
			     : "=m" (fpu->state->fxsave)
			     : [fx] "R" (&fpu->state->fxsave));
			     : [fx] "R" (&fpu->state->fxsave));
#endif
	}
	}
}
#ifdef CONFIG_X86_64


int ia32_setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
int ia32_setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
			compat_sigset_t *set, struct pt_regs *regs);
			compat_sigset_t *set, struct pt_regs *regs);
@@ -203,28 +198,6 @@ int ia32_setup_frame(int sig, struct k_sigaction *ka,


#else  /* CONFIG_X86_32 */
#else  /* CONFIG_X86_32 */


/* perform fxrstor iff the processor has extended states, otherwise frstor */
static inline int fxrstor_checking(struct i387_fxsave_struct *fx)
{
	/*
	 * The "nop" is needed to make the instructions the same
	 * length.
	 */
	alternative_input(
		"nop ; frstor %1",
		"fxrstor %1",
		X86_FEATURE_FXSR,
		"m" (*fx));

	return 0;
}

static inline void fpu_fxsave(struct fpu *fpu)
{
	asm volatile("fxsave %[fx]"
		     : [fx] "=m" (fpu->state->fxsave));
}

#define ia32_setup_frame	__setup_frame
#define ia32_setup_frame	__setup_frame
#define ia32_setup_rt_frame	__setup_rt_frame
#define ia32_setup_rt_frame	__setup_rt_frame


@@ -272,17 +245,14 @@ static inline int __save_init_fpu(struct task_struct *tsk)
	return fpu_save_init(&tsk->thread.fpu);
	return fpu_save_init(&tsk->thread.fpu);
}
}


static inline int fpu_fxrstor_checking(struct fpu *fpu)
{
	return fxrstor_checking(&fpu->state->fxsave);
}

static inline int fpu_restore_checking(struct fpu *fpu)
static inline int fpu_restore_checking(struct fpu *fpu)
{
{
	if (use_xsave())
	if (use_xsave())
		return fpu_xrstor_checking(fpu);
		return fpu_xrstor_checking(&fpu->state->xsave);
	else if (use_fxsr())
		return fxrstor_checking(&fpu->state->fxsave);
	else
	else
		return fpu_fxrstor_checking(fpu);
		return frstor_checking(&fpu->state->fsave);
}
}


static inline int restore_fpu_checking(struct task_struct *tsk)
static inline int restore_fpu_checking(struct task_struct *tsk)
+1 −5
Original line number Original line Diff line number Diff line
@@ -42,9 +42,8 @@ extern int check_for_xstate(struct i387_fxsave_struct __user *buf,
			    void __user *fpstate,
			    void __user *fpstate,
			    struct _fpx_sw_bytes *sw);
			    struct _fpx_sw_bytes *sw);


static inline int fpu_xrstor_checking(struct fpu *fpu)
static inline int fpu_xrstor_checking(struct xsave_struct *fx)
{
{
	struct xsave_struct *fx = &fpu->state->xsave;
	int err;
	int err;


	asm volatile("1: .byte " REX_PREFIX "0x0f,0xae,0x2f\n\t"
	asm volatile("1: .byte " REX_PREFIX "0x0f,0xae,0x2f\n\t"
@@ -84,9 +83,6 @@ static inline int xsave_user(struct xsave_struct __user *buf)
			     : [err] "=r" (err)
			     : [err] "=r" (err)
			     : "D" (buf), "a" (-1), "d" (-1), "0" (0)
			     : "D" (buf), "a" (-1), "d" (-1), "0" (0)
			     : "memory");
			     : "memory");
	if (unlikely(err) && __clear_user(buf, xstate_size))
		err = -EFAULT;
	/* No need to clear here because the caller clears USED_MATH */
	return err;
	return err;
}
}


+3 −1
Original line number Original line Diff line number Diff line
@@ -176,8 +176,10 @@ int save_i387_xstate(void __user *buf)
		else
		else
			err = fxsave_user(buf);
			err = fxsave_user(buf);


		if (err)
		if (unlikely(err)) {
			__clear_user(buf, xstate_size);
			return err;
			return err;
		}
		user_fpu_end();
		user_fpu_end();
	} else {
	} else {
		sanitize_i387_state(tsk);
		sanitize_i387_state(tsk);