Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit c93eceda authored by Linus Torvalds's avatar Linus Torvalds
Browse files

Merge branch 'x86-fpu-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull x86 fpu updates from Ingo Molnar:
 "Initial round of kernel_fpu_begin/end cleanups from Oleg Nesterov,
  plus a cleanup from Borislav Petkov"

* 'x86-fpu-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  x86, fpu: Fix math_state_restore() race with kernel_fpu_begin()
  x86, fpu: Don't abuse has_fpu in __kernel_fpu_begin/end()
  x86, fpu: Introduce per-cpu in_kernel_fpu state
  x86/fpu: Use a symbolic name for asm operand
parents 072bc448 7575637a
Loading
Loading
Loading
Loading
+6 −4
Original line number Diff line number Diff line
@@ -207,7 +207,7 @@ static inline void fpu_fxsave(struct fpu *fpu)
	if (config_enabled(CONFIG_X86_32))
		asm volatile( "fxsave %[fx]" : [fx] "=m" (fpu->state->fxsave));
	else if (config_enabled(CONFIG_AS_FXSAVEQ))
		asm volatile("fxsaveq %0" : "=m" (fpu->state->fxsave));
		asm volatile("fxsaveq %[fx]" : [fx] "=m" (fpu->state->fxsave));
	else {
		/* Using "rex64; fxsave %0" is broken because, if the memory
		 * operand uses any extended registers for addressing, a second
@@ -290,9 +290,11 @@ static inline int fpu_restore_checking(struct fpu *fpu)

static inline int restore_fpu_checking(struct task_struct *tsk)
{
	/* AMD K7/K8 CPUs don't save/restore FDP/FIP/FOP unless an exception
	   is pending.  Clear the x87 state here by setting it to fixed
	   values. "m" is a random variable that should be in L1 */
	/*
	 * AMD K7/K8 CPUs don't save/restore FDP/FIP/FOP unless an exception is
	 * pending. Clear the x87 state here by setting it to fixed values.
	 * "m" is a random variable that should be in L1.
	 */
	if (unlikely(static_cpu_has_bug_safe(X86_BUG_FXSAVE_LEAK))) {
		asm volatile(
			"fnclex\n\t"
+5 −1
Original line number Diff line number Diff line
@@ -40,8 +40,8 @@ extern void __kernel_fpu_end(void);

static inline void kernel_fpu_begin(void)
{
	WARN_ON_ONCE(!irq_fpu_usable());
	preempt_disable();
	WARN_ON_ONCE(!irq_fpu_usable());
	__kernel_fpu_begin();
}

@@ -51,6 +51,10 @@ static inline void kernel_fpu_end(void)
	preempt_enable();
}

/* Must be called with preempt disabled */
extern void kernel_fpu_disable(void);
extern void kernel_fpu_enable(void);

/*
 * Some instructions like VIA's padlock instructions generate a spurious
 * DNA fault but don't modify SSE registers. And these instructions
+26 −13
Original line number Diff line number Diff line
@@ -19,6 +19,19 @@
#include <asm/fpu-internal.h>
#include <asm/user.h>

static DEFINE_PER_CPU(bool, in_kernel_fpu);

void kernel_fpu_disable(void)
{
	WARN_ON(this_cpu_read(in_kernel_fpu));
	this_cpu_write(in_kernel_fpu, true);
}

void kernel_fpu_enable(void)
{
	this_cpu_write(in_kernel_fpu, false);
}

/*
 * Were we in an interrupt that interrupted kernel mode?
 *
@@ -33,6 +46,9 @@
 */
static inline bool interrupted_kernel_fpu_idle(void)
{
	if (this_cpu_read(in_kernel_fpu))
		return false;

	if (use_eager_fpu())
		return __thread_has_fpu(current);

@@ -73,10 +89,10 @@ void __kernel_fpu_begin(void)
{
	struct task_struct *me = current;

	this_cpu_write(in_kernel_fpu, true);

	if (__thread_has_fpu(me)) {
		__thread_clear_has_fpu(me);
		__save_init_fpu(me);
		/* We do 'stts()' in __kernel_fpu_end() */
	} else if (!use_eager_fpu()) {
		this_cpu_write(fpu_owner_task, NULL);
		clts();
@@ -86,19 +102,16 @@ EXPORT_SYMBOL(__kernel_fpu_begin);

void __kernel_fpu_end(void)
{
	if (use_eager_fpu()) {
		/*
		 * For eager fpu, most the time, tsk_used_math() is true.
		 * Restore the user math as we are done with the kernel usage.
		 * At few instances during thread exit, signal handling etc,
		 * tsk_used_math() is false. Those few places will take proper
		 * actions, so we don't need to restore the math here.
		 */
		if (likely(tsk_used_math(current)))
			math_state_restore();
	} else {
	struct task_struct *me = current;

	if (__thread_has_fpu(me)) {
		if (WARN_ON(restore_fpu_checking(me)))
			drop_init_fpu(me);
	} else if (!use_eager_fpu()) {
		stts();
	}

	this_cpu_write(in_kernel_fpu, false);
}
EXPORT_SYMBOL(__kernel_fpu_end);

+5 −7
Original line number Diff line number Diff line
@@ -859,19 +859,17 @@ void math_state_restore(void)
		local_irq_disable();
	}

	/* Avoid __kernel_fpu_begin() right after __thread_fpu_begin() */
	kernel_fpu_disable();
	__thread_fpu_begin(tsk);

	/*
	 * Paranoid restore. send a SIGSEGV if we fail to restore the state.
	 */
	if (unlikely(restore_fpu_checking(tsk))) {
		drop_init_fpu(tsk);
		force_sig_info(SIGSEGV, SEND_SIG_PRIV, tsk);
		return;
	}

	} else {
		tsk->thread.fpu_counter++;
	}
	kernel_fpu_enable();
}
EXPORT_SYMBOL_GPL(math_state_restore);

dotraplinkage void