Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit eb6a3251 authored by Ingo Molnar's avatar Ingo Molnar
Browse files

x86/fpu: Remove task_disable_lazy_fpu_restore()



Replace task_disable_lazy_fpu_restore() with easier to read
open-coded uses: we already update the fpu->last_cpu field
explicitly in other cases.

(This also removes yet another task_struct using FPU method.)

Better explain the fpu::last_cpu field in the structure definition.

Reviewed-by: default avatarBorislav Petkov <bp@alien8.de>
Cc: Andy Lutomirski <luto@amacapital.net>
Cc: Dave Hansen <dave.hansen@linux.intel.com>
Cc: Fenghua Yu <fenghua.yu@intel.com>
Cc: H. Peter Anvin <hpa@zytor.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Oleg Nesterov <oleg@redhat.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: default avatarIngo Molnar <mingo@kernel.org>
parent ca6787ba
Loading
Loading
Loading
Loading
+2 −12
Original line number Diff line number Diff line
@@ -74,16 +74,6 @@ static inline void __cpu_disable_lazy_restore(unsigned int cpu)
	per_cpu(fpu_fpregs_owner_ctx, cpu) = NULL;
}

/*
 * Used to indicate that the FPU state in memory is newer than the FPU
 * state in registers, and the FPU state should be reloaded next time the
 * task is run. Only safe on the current task, or non-running tasks.
 */
static inline void task_disable_lazy_fpu_restore(struct task_struct *tsk)
{
	tsk->thread.fpu.last_cpu = ~0;
}

static inline int fpu_lazy_restore(struct task_struct *new, unsigned int cpu)
{
	return &new->thread.fpu == this_cpu_read_stable(fpu_fpregs_owner_ctx) &&
@@ -430,7 +420,7 @@ static inline fpu_switch_t switch_fpu_prepare(struct task_struct *old, struct ta

	if (old_fpu->has_fpu) {
		if (!fpu_save_init(&old->thread.fpu))
			task_disable_lazy_fpu_restore(old);
			old->thread.fpu.last_cpu = -1;
		else
			old->thread.fpu.last_cpu = cpu;

@@ -446,7 +436,7 @@ static inline fpu_switch_t switch_fpu_prepare(struct task_struct *old, struct ta
			stts();
	} else {
		old->thread.fpu.counter = 0;
		task_disable_lazy_fpu_restore(old);
		old->thread.fpu.last_cpu = -1;
		if (fpu.preload) {
			new->thread.fpu.counter++;
			if (fpu_lazy_restore(new, cpu))
+11 −0
Original line number Diff line number Diff line
@@ -125,7 +125,18 @@ union thread_xstate {
};

struct fpu {
	/*
	 * Records the last CPU on which this context was loaded into
	 * FPU registers. (In the lazy-switching case we might be
	 * able to reuse FPU registers across multiple context switches
	 * this way, if no intermediate task used the FPU.)
	 *
	 * A value of -1 is used to indicate that the FPU state in context
	 * memory is newer than the FPU state in registers, and that the
	 * FPU state should be reloaded next time the task is run.
	 */
	unsigned int			last_cpu;

	unsigned int			has_fpu;
	union thread_xstate		*state;
	/*
+2 −3
Original line number Diff line number Diff line
@@ -242,8 +242,7 @@ int fpu__copy(struct task_struct *dst, struct task_struct *src)
	dst->thread.fpu.counter = 0;
	dst->thread.fpu.has_fpu = 0;
	dst->thread.fpu.state = NULL;

	task_disable_lazy_fpu_restore(dst);
	dst->thread.fpu.last_cpu = -1;

	if (src_fpu->fpstate_active) {
		int err = fpstate_alloc(dst_fpu);
@@ -319,7 +318,7 @@ static int fpu__unlazy_stopped(struct task_struct *child)
		return -EINVAL;

	if (child_fpu->fpstate_active) {
		task_disable_lazy_fpu_restore(child);
		child->thread.fpu.last_cpu = -1;
		return 0;
	}