Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 384a23f9 authored by Ingo Molnar's avatar Ingo Molnar
Browse files

x86/fpu: Use 'struct fpu' in switch_fpu_finish()



Migrate this function to pure 'struct fpu' usage.

Reviewed-by: default avatarBorislav Petkov <bp@alien8.de>
Cc: Andy Lutomirski <luto@amacapital.net>
Cc: Dave Hansen <dave.hansen@linux.intel.com>
Cc: Fenghua Yu <fenghua.yu@intel.com>
Cc: H. Peter Anvin <hpa@zytor.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Oleg Nesterov <oleg@redhat.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: default avatarIngo Molnar <mingo@kernel.org>
parent cb8818b6
Loading
Loading
Loading
Loading
+2 −4
Original line number Diff line number Diff line
@@ -451,11 +451,9 @@ switch_fpu_prepare(struct fpu *old_fpu, struct fpu *new_fpu, int cpu)
 * state - all we need to do is to conditionally restore the register
 * state itself.
 */
static inline void switch_fpu_finish(struct task_struct *new, fpu_switch_t fpu)
static inline void switch_fpu_finish(struct fpu *new_fpu, fpu_switch_t fpu_switch)
{
	struct fpu *new_fpu = &new->thread.fpu;

	if (fpu.preload) {
	if (fpu_switch.preload) {
		if (unlikely(restore_fpu_checking(new_fpu)))
			fpu_reset_state(new_fpu);
	}
+6 −4
Original line number Diff line number Diff line
@@ -242,13 +242,15 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
{
	struct thread_struct *prev = &prev_p->thread,
			     *next = &next_p->thread;
	struct fpu *prev_fpu = &prev->fpu;
	struct fpu *next_fpu = &next->fpu;
	int cpu = smp_processor_id();
	struct tss_struct *tss = &per_cpu(cpu_tss, cpu);
	fpu_switch_t fpu;
	fpu_switch_t fpu_switch;

	/* never put a printk in __switch_to... printk() calls wake_up*() indirectly */

	fpu = switch_fpu_prepare(&prev_p->thread.fpu, &next_p->thread.fpu, cpu);
	fpu_switch = switch_fpu_prepare(prev_fpu, next_fpu, cpu);

	/*
	 * Save away %gs. No need to save %fs, as it was saved on the
@@ -318,7 +320,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
	if (prev->gs | next->gs)
		lazy_load_gs(next->gs);

	switch_fpu_finish(next_p, fpu);
	switch_fpu_finish(next_fpu, fpu_switch);

	this_cpu_write(current_task, next_p);

+5 −3
Original line number Diff line number Diff line
@@ -273,12 +273,14 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
{
	struct thread_struct *prev = &prev_p->thread;
	struct thread_struct *next = &next_p->thread;
	struct fpu *prev_fpu = &prev->fpu;
	struct fpu *next_fpu = &next->fpu;
	int cpu = smp_processor_id();
	struct tss_struct *tss = &per_cpu(cpu_tss, cpu);
	unsigned fsindex, gsindex;
	fpu_switch_t fpu;
	fpu_switch_t fpu_switch;

	fpu = switch_fpu_prepare(&prev_p->thread.fpu, &next_p->thread.fpu, cpu);
	fpu_switch = switch_fpu_prepare(prev_fpu, next_fpu, cpu);

	/* We must save %fs and %gs before load_TLS() because
	 * %fs and %gs may be cleared by load_TLS().
@@ -390,7 +392,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
		wrmsrl(MSR_KERNEL_GS_BASE, next->gs);
	prev->gsindex = gsindex;

	switch_fpu_finish(next_p, fpu);
	switch_fpu_finish(next_fpu, fpu_switch);

	/*
	 * Switch the PDA and FPU contexts.