Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 304bceda authored by Suresh Siddha's avatar Suresh Siddha Committed by H. Peter Anvin
Browse files

x86, fpu: use non-lazy fpu restore for processors supporting xsave



Fundamental model of the current Linux kernel is to lazily init and
restore FPU instead of restoring the task state during context switch.
This changes that fundamental lazy model to the non-lazy model for
the processors supporting xsave feature.

Reasons driving this model change are:

i. Newer processors support optimized state save/restore using xsaveopt and
xrstor by tracking the INIT state and MODIFIED state during context-switch.
This is faster than modifying the cr0.TS bit which has serializing semantics.

ii. Newer glibc versions use SSE for some of the optimized copy/clear routines.
With certain workloads (like boot, kernel-compilation etc), application
completes its work with in the first 5 task switches, thus taking upto 5 #DNA
traps with the kernel not getting a chance to apply the above mentioned
pre-load heuristic.

iii. Some xstate features (like AMD's LWP feature) don't honor the cr0.TS bit
and thus will not work correctly in the presence of lazy restore. Non-lazy
state restore is needed for enabling such features.

Some data on a two socket SNB system:
 * Saved 20K DNA exceptions during boot on a two socket SNB system.
 * Saved 50K DNA exceptions during kernel-compilation workload.
 * Improved throughput of the AVX based checksumming function inside the
   kernel by ~15% as xsave/xrstor is faster than the serializing clts/stts
   pair.

Also now kernel_fpu_begin/end() relies on the patched
alternative instructions. So move check_fpu() which uses the
kernel_fpu_begin/end() after alternative_instructions().

Signed-off-by: default avatarSuresh Siddha <suresh.b.siddha@intel.com>
Link: http://lkml.kernel.org/r/1345842782-24175-7-git-send-email-suresh.b.siddha@intel.com
Merge 32-bit boot fix from,
Link: http://lkml.kernel.org/r/1347300665-6209-4-git-send-email-suresh.b.siddha@intel.com


Cc: Jim Kukunas <james.t.kukunas@linux.intel.com>
Cc: NeilBrown <neilb@suse.de>
Cc: Avi Kivity <avi@redhat.com>
Signed-off-by: default avatarH. Peter Anvin <hpa@linux.intel.com>
parent 9c6ff8bb
Loading
Loading
Loading
Loading
+63 −33
Original line number Diff line number Diff line
@@ -291,15 +291,48 @@ static inline void __thread_set_has_fpu(struct task_struct *tsk)
static inline void __thread_fpu_end(struct task_struct *tsk)
{
	__thread_clear_has_fpu(tsk);
	if (!use_xsave())
		stts();
}

static inline void __thread_fpu_begin(struct task_struct *tsk)
{
	if (!use_xsave())
		clts();
	__thread_set_has_fpu(tsk);
}

static inline void __drop_fpu(struct task_struct *tsk)
{
	if (__thread_has_fpu(tsk)) {
		/* Ignore delayed exceptions from user space */
		asm volatile("1: fwait\n"
			     "2:\n"
			     _ASM_EXTABLE(1b, 2b));
		__thread_fpu_end(tsk);
	}
}

static inline void drop_fpu(struct task_struct *tsk)
{
	/*
	 * Forget coprocessor state..
	 */
	preempt_disable();
	tsk->fpu_counter = 0;
	__drop_fpu(tsk);
	clear_used_math();
	preempt_enable();
}

static inline void drop_init_fpu(struct task_struct *tsk)
{
	if (!use_xsave())
		drop_fpu(tsk);
	else
		xrstor_state(init_xstate_buf, -1);
}

/*
 * FPU state switching for scheduling.
 *
@@ -333,7 +366,12 @@ static inline fpu_switch_t switch_fpu_prepare(struct task_struct *old, struct ta
{
	fpu_switch_t fpu;

	fpu.preload = tsk_used_math(new) && new->fpu_counter > 5;
	/*
	 * If the task has used the math, pre-load the FPU on xsave processors
	 * or if the past 5 consecutive context-switches used math.
	 */
	fpu.preload = tsk_used_math(new) && (use_xsave() ||
					     new->fpu_counter > 5);
	if (__thread_has_fpu(old)) {
		if (!__save_init_fpu(old))
			cpu = ~0;
@@ -345,14 +383,14 @@ static inline fpu_switch_t switch_fpu_prepare(struct task_struct *old, struct ta
			new->fpu_counter++;
			__thread_set_has_fpu(new);
			prefetch(new->thread.fpu.state);
		} else
		} else if (!use_xsave())
			stts();
	} else {
		old->fpu_counter = 0;
		old->thread.fpu.last_cpu = ~0;
		if (fpu.preload) {
			new->fpu_counter++;
			if (fpu_lazy_restore(new, cpu))
			if (!use_xsave() && fpu_lazy_restore(new, cpu))
				fpu.preload = 0;
			else
				prefetch(new->thread.fpu.state);
@@ -372,7 +410,7 @@ static inline void switch_fpu_finish(struct task_struct *new, fpu_switch_t fpu)
{
	if (fpu.preload) {
		if (unlikely(restore_fpu_checking(new)))
			__thread_fpu_end(new);
			drop_init_fpu(new);
	}
}

@@ -400,17 +438,6 @@ static inline int restore_xstate_sig(void __user *buf, int ia32_frame)
	return __restore_xstate_sig(buf, buf_fx, size);
}

static inline void __drop_fpu(struct task_struct *tsk)
{
	if (__thread_has_fpu(tsk)) {
		/* Ignore delayed exceptions from user space */
		asm volatile("1: fwait\n"
			     "2:\n"
			     _ASM_EXTABLE(1b, 2b));
		__thread_fpu_end(tsk);
	}
}

/*
 * Need to be preemption-safe.
 *
@@ -431,22 +458,16 @@ static inline void user_fpu_begin(void)
static inline void save_init_fpu(struct task_struct *tsk)
{
	WARN_ON_ONCE(!__thread_has_fpu(tsk));
	preempt_disable();
	__save_init_fpu(tsk);
	__thread_fpu_end(tsk);
	preempt_enable();

	if (use_xsave()) {
		xsave_state(&tsk->thread.fpu.state->xsave, -1);
		return;
	}

static inline void drop_fpu(struct task_struct *tsk)
{
	/*
	 * Forget coprocessor state..
	 */
	tsk->fpu_counter = 0;
	preempt_disable();
	__drop_fpu(tsk);
	__save_init_fpu(tsk);
	__thread_fpu_end(tsk);
	preempt_enable();
	clear_used_math();
}

/*
@@ -503,12 +524,21 @@ static inline void fpu_free(struct fpu *fpu)
	}
}

static inline void fpu_copy(struct fpu *dst, struct fpu *src)
static inline void fpu_copy(struct task_struct *dst, struct task_struct *src)
{
	memcpy(dst->state, src->state, xstate_size);
}
	if (use_xsave()) {
		struct xsave_struct *xsave = &dst->thread.fpu.state->xsave;

extern void fpu_finit(struct fpu *fpu);
		memset(&xsave->xsave_hdr, 0, sizeof(struct xsave_hdr_struct));
		xsave_state(xsave, -1);
	} else {
		struct fpu *dfpu = &dst->thread.fpu;
		struct fpu *sfpu = &src->thread.fpu;

		unlazy_fpu(src);
		memcpy(dfpu->state, sfpu->state, xstate_size);
	}
}

static inline unsigned long
alloc_mathframe(unsigned long sp, int ia32_frame, unsigned long *buf_fx,
+1 −0
Original line number Diff line number Diff line
@@ -19,6 +19,7 @@ struct pt_regs;
struct user_i387_struct;

extern int init_fpu(struct task_struct *child);
extern void fpu_finit(struct fpu *fpu);
extern int dump_fpu(struct pt_regs *, struct user_i387_struct *);
extern void math_state_restore(void);

+1 −0
Original line number Diff line number Diff line
@@ -34,6 +34,7 @@
extern unsigned int xstate_size;
extern u64 pcntxt_mask;
extern u64 xstate_fx_sw_bytes[USER_XSTATE_FX_SW_WORDS];
extern struct xsave_struct *init_xstate_buf;

extern void xsave_init(void);
extern void update_regset_xstate_info(unsigned int size, u64 xstate_mask);
+6 −1
Original line number Diff line number Diff line
@@ -165,10 +165,15 @@ void __init check_bugs(void)
	print_cpu_info(&boot_cpu_data);
#endif
	check_config();
	check_fpu();
	check_hlt();
	check_popad();
	init_utsname()->machine[1] =
		'0' + (boot_cpu_data.x86 > 6 ? 6 : boot_cpu_data.x86);
	alternative_instructions();

	/*
	 * kernel_fpu_begin/end() in check_fpu() relies on the patched
	 * alternative instructions.
	 */
	check_fpu();
}
+17 −3
Original line number Diff line number Diff line
@@ -22,7 +22,15 @@
/*
 * Were we in an interrupt that interrupted kernel mode?
 *
 * We can do a kernel_fpu_begin/end() pair *ONLY* if that
 * For now, on xsave platforms we will return interrupted
 * kernel FPU as not-idle. TBD: As we use non-lazy FPU restore
 * for xsave platforms, ideally we can change the return value
 * to something like __thread_has_fpu(current). But we need to
 * be careful of doing __thread_clear_has_fpu() before saving
 * the FPU etc for supporting nested uses etc. For now, take
 * the simple route!
 *
 * On others, we can do a kernel_fpu_begin/end() pair *ONLY* if that
 * pair does nothing at all: the thread must not have fpu (so
 * that we don't try to save the FPU state), and TS must
 * be set (so that the clts/stts pair does nothing that is
@@ -30,6 +38,9 @@
 */
static inline bool interrupted_kernel_fpu_idle(void)
{
	if (use_xsave())
		return 0;

	return !__thread_has_fpu(current) &&
		(read_cr0() & X86_CR0_TS);
}
@@ -73,7 +84,7 @@ void kernel_fpu_begin(void)
		__save_init_fpu(me);
		__thread_clear_has_fpu(me);
		/* We do 'stts()' in kernel_fpu_end() */
	} else {
	} else if (!use_xsave()) {
		this_cpu_write(fpu_owner_task, NULL);
		clts();
	}
@@ -82,6 +93,9 @@ EXPORT_SYMBOL(kernel_fpu_begin);

void kernel_fpu_end(void)
{
	if (use_xsave())
		math_state_restore();
	else
		stts();
	preempt_enable();
}
Loading