Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit aa283f49 authored by Suresh Siddha's avatar Suresh Siddha Committed by Ingo Molnar
Browse files

x86, fpu: lazy allocation of FPU area - v5



Only allocate the FPU area when the application actually uses FPU, i.e., in the
first lazy FPU trap. This could save memory for non-fpu using apps.

for example: on my system after boot, there are around 300 processes, with
only 17 using FPU.

Signed-off-by: default avatarSuresh Siddha <suresh.b.siddha@intel.com>
Cc: Arjan van de Ven <arjan@linux.intel.com>
Signed-off-by: default avatarIngo Molnar <mingo@elte.hu>
Signed-off-by: default avatarThomas Gleixner <tglx@linutronix.de>
parent 61c4628b
Loading
Loading
Loading
Loading
+30 −8
Original line number Original line Diff line number Diff line
@@ -8,7 +8,6 @@
#include <linux/module.h>
#include <linux/module.h>
#include <linux/regset.h>
#include <linux/regset.h>
#include <linux/sched.h>
#include <linux/sched.h>
#include <linux/bootmem.h>


#include <asm/sigcontext.h>
#include <asm/sigcontext.h>
#include <asm/processor.h>
#include <asm/processor.h>
@@ -63,7 +62,6 @@ void __init init_thread_xstate(void)
	else
	else
		xstate_size = sizeof(struct i387_fsave_struct);
		xstate_size = sizeof(struct i387_fsave_struct);
#endif
#endif
	init_task.thread.xstate = alloc_bootmem(xstate_size);
}
}


#ifdef CONFIG_X86_64
#ifdef CONFIG_X86_64
@@ -93,12 +91,22 @@ void __cpuinit fpu_init(void)
 * value at reset if we support XMM instructions and then
 * value at reset if we support XMM instructions and then
 * remeber the current task has used the FPU.
 * remeber the current task has used the FPU.
 */
 */
void init_fpu(struct task_struct *tsk)
int init_fpu(struct task_struct *tsk)
{
{
	if (tsk_used_math(tsk)) {
	if (tsk_used_math(tsk)) {
		if (tsk == current)
		if (tsk == current)
			unlazy_fpu(tsk);
			unlazy_fpu(tsk);
		return;
		return 0;
	}

	/*
	 * Memory allocation at the first usage of the FPU and other state.
	 */
	if (!tsk->thread.xstate) {
		tsk->thread.xstate = kmem_cache_alloc(task_xstate_cachep,
						      GFP_KERNEL);
		if (!tsk->thread.xstate)
			return -ENOMEM;
	}
	}


	if (cpu_has_fxsr) {
	if (cpu_has_fxsr) {
@@ -120,6 +128,7 @@ void init_fpu(struct task_struct *tsk)
	 * Only the device not available exception or ptrace can call init_fpu.
	 * Only the device not available exception or ptrace can call init_fpu.
	 */
	 */
	set_stopped_child_used_math(tsk);
	set_stopped_child_used_math(tsk);
	return 0;
}
}


int fpregs_active(struct task_struct *target, const struct user_regset *regset)
int fpregs_active(struct task_struct *target, const struct user_regset *regset)
@@ -136,10 +145,14 @@ int xfpregs_get(struct task_struct *target, const struct user_regset *regset,
		unsigned int pos, unsigned int count,
		unsigned int pos, unsigned int count,
		void *kbuf, void __user *ubuf)
		void *kbuf, void __user *ubuf)
{
{
	int ret;

	if (!cpu_has_fxsr)
	if (!cpu_has_fxsr)
		return -ENODEV;
		return -ENODEV;


	init_fpu(target);
	ret = init_fpu(target);
	if (ret)
		return ret;


	return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
	return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
				   &target->thread.xstate->fxsave, 0, -1);
				   &target->thread.xstate->fxsave, 0, -1);
@@ -154,7 +167,10 @@ int xfpregs_set(struct task_struct *target, const struct user_regset *regset,
	if (!cpu_has_fxsr)
	if (!cpu_has_fxsr)
		return -ENODEV;
		return -ENODEV;


	init_fpu(target);
	ret = init_fpu(target);
	if (ret)
		return ret;

	set_stopped_child_used_math(target);
	set_stopped_child_used_math(target);


	ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
	ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
@@ -312,11 +328,14 @@ int fpregs_get(struct task_struct *target, const struct user_regset *regset,
	       void *kbuf, void __user *ubuf)
	       void *kbuf, void __user *ubuf)
{
{
	struct user_i387_ia32_struct env;
	struct user_i387_ia32_struct env;
	int ret;


	if (!HAVE_HWFP)
	if (!HAVE_HWFP)
		return fpregs_soft_get(target, regset, pos, count, kbuf, ubuf);
		return fpregs_soft_get(target, regset, pos, count, kbuf, ubuf);


	init_fpu(target);
	ret = init_fpu(target);
	if (ret)
		return ret;


	if (!cpu_has_fxsr) {
	if (!cpu_has_fxsr) {
		return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
		return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
@@ -344,7 +363,10 @@ int fpregs_set(struct task_struct *target, const struct user_regset *regset,
	if (!HAVE_HWFP)
	if (!HAVE_HWFP)
		return fpregs_soft_set(target, regset, pos, count, kbuf, ubuf);
		return fpregs_soft_set(target, regset, pos, count, kbuf, ubuf);


	init_fpu(target);
	ret = init_fpu(target);
	if (ret)
		return ret;

	set_stopped_child_used_math(target);
	set_stopped_child_used_math(target);


	if (!cpu_has_fxsr) {
	if (!cpu_has_fxsr) {
+19 −9
Original line number Original line Diff line number Diff line
@@ -5,24 +5,34 @@
#include <linux/slab.h>
#include <linux/slab.h>
#include <linux/sched.h>
#include <linux/sched.h>


static struct kmem_cache *task_xstate_cachep;
struct kmem_cache *task_xstate_cachep;


int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
{
{
	*dst = *src;
	*dst = *src;
	dst->thread.xstate = kmem_cache_alloc(task_xstate_cachep, GFP_KERNEL);
	if (src->thread.xstate) {
		dst->thread.xstate = kmem_cache_alloc(task_xstate_cachep,
						      GFP_KERNEL);
		if (!dst->thread.xstate)
		if (!dst->thread.xstate)
			return -ENOMEM;
			return -ENOMEM;
		WARN_ON((unsigned long)dst->thread.xstate & 15);
		WARN_ON((unsigned long)dst->thread.xstate & 15);
		memcpy(dst->thread.xstate, src->thread.xstate, xstate_size);
		memcpy(dst->thread.xstate, src->thread.xstate, xstate_size);
	}
	return 0;
	return 0;
}
}


void free_thread_info(struct thread_info *ti)
void free_thread_xstate(struct task_struct *tsk)
{
{
	kmem_cache_free(task_xstate_cachep, ti->task->thread.xstate);
	if (tsk->thread.xstate) {
	ti->task->thread.xstate = NULL;
		kmem_cache_free(task_xstate_cachep, tsk->thread.xstate);
		tsk->thread.xstate = NULL;
	}
}



void free_thread_info(struct thread_info *ti)
{
	free_thread_xstate(ti->task);
	free_pages((unsigned long)(ti), get_order(THREAD_SIZE));
	free_pages((unsigned long)(ti), get_order(THREAD_SIZE));
}
}


+4 −0
Original line number Original line Diff line number Diff line
@@ -521,6 +521,10 @@ start_thread(struct pt_regs *regs, unsigned long new_ip, unsigned long new_sp)
	regs->cs		= __USER_CS;
	regs->cs		= __USER_CS;
	regs->ip		= new_ip;
	regs->ip		= new_ip;
	regs->sp		= new_sp;
	regs->sp		= new_sp;
	/*
	 * Free the old FP and other extended state
	 */
	free_thread_xstate(current);
}
}
EXPORT_SYMBOL_GPL(start_thread);
EXPORT_SYMBOL_GPL(start_thread);


+4 −0
Original line number Original line Diff line number Diff line
@@ -533,6 +533,10 @@ start_thread(struct pt_regs *regs, unsigned long new_ip, unsigned long new_sp)
	regs->ss		= __USER_DS;
	regs->ss		= __USER_DS;
	regs->flags		= 0x200;
	regs->flags		= 0x200;
	set_fs(USER_DS);
	set_fs(USER_DS);
	/*
	 * Free the old FP and other extended state
	 */
	free_thread_xstate(current);
}
}
EXPORT_SYMBOL_GPL(start_thread);
EXPORT_SYMBOL_GPL(start_thread);


+15 −2
Original line number Original line Diff line number Diff line
@@ -1148,9 +1148,22 @@ asmlinkage void math_state_restore(void)
	struct thread_info *thread = current_thread_info();
	struct thread_info *thread = current_thread_info();
	struct task_struct *tsk = thread->task;
	struct task_struct *tsk = thread->task;


	if (!tsk_used_math(tsk)) {
		local_irq_enable();
		/*
		 * does a slab alloc which can sleep
		 */
		if (init_fpu(tsk)) {
			/*
			 * ran out of memory!
			 */
			do_group_exit(SIGKILL);
			return;
		}
		local_irq_disable();
	}

	clts();				/* Allow maths ops (or we recurse) */
	clts();				/* Allow maths ops (or we recurse) */
	if (!tsk_used_math(tsk))
		init_fpu(tsk);
	restore_fpu(tsk);
	restore_fpu(tsk);
	thread->status |= TS_USEDFPU;	/* So we fnsave on switch_to() */
	thread->status |= TS_USEDFPU;	/* So we fnsave on switch_to() */
	tsk->fpu_counter++;
	tsk->fpu_counter++;
Loading