Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit c7b228ad authored by Linus Torvalds's avatar Linus Torvalds
Browse files

Merge branch 'x86-fpu-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull x86 FPU updates from Ingo Molnar:
 "x86 FPU handling fixes, cleanups and enhancements from Oleg.

  The signal handling race fix and the __restore_xstate_sig() preemption
  fix for eager-mode is marked for -stable as well"

* 'x86-fpu-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  x86: copy_thread: Don't nullify ->ptrace_bps twice
  x86, fpu: Shift "fpu_counter = 0" from copy_thread() to arch_dup_task_struct()
  x86, fpu: copy_process: Sanitize fpu->last_cpu initialization
  x86, fpu: copy_process: Avoid fpu_alloc/copy if !used_math()
  x86, fpu: Change __thread_fpu_begin() to use use_eager_fpu()
  x86, fpu: __restore_xstate_sig()->math_state_restore() needs preempt_disable()
  x86, fpu: shift drop_init_fpu() from save_xstate_sig() to handle_signal()
parents 708d0b41 6f46b3ae
Loading
Loading
Loading
Loading
+1 −1
Original line number Original line Diff line number Diff line
@@ -344,7 +344,7 @@ static inline void __thread_fpu_end(struct task_struct *tsk)


static inline void __thread_fpu_begin(struct task_struct *tsk)
static inline void __thread_fpu_begin(struct task_struct *tsk)
{
{
	if (!static_cpu_has_safe(X86_FEATURE_EAGER_FPU))
	if (!use_eager_fpu())
		clts();
		clts();
	__thread_set_has_fpu(tsk);
	__thread_set_has_fpu(tsk);
}
}
+9 −7
Original line number Original line Diff line number Diff line
@@ -64,14 +64,16 @@ EXPORT_SYMBOL_GPL(task_xstate_cachep);
 */
 */
int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
{
{
	int ret;

	*dst = *src;
	*dst = *src;
	if (fpu_allocated(&src->thread.fpu)) {

		memset(&dst->thread.fpu, 0, sizeof(dst->thread.fpu));
	dst->thread.fpu_counter = 0;
		ret = fpu_alloc(&dst->thread.fpu);
	dst->thread.fpu.has_fpu = 0;
		if (ret)
	dst->thread.fpu.last_cpu = ~0;
			return ret;
	dst->thread.fpu.state = NULL;
	if (tsk_used_math(src)) {
		int err = fpu_alloc(&dst->thread.fpu);
		if (err)
			return err;
		fpu_copy(dst, src);
		fpu_copy(dst, src);
	}
	}
	return 0;
	return 0;
+1 −5
Original line number Original line Diff line number Diff line
@@ -138,6 +138,7 @@ int copy_thread(unsigned long clone_flags, unsigned long sp,


	p->thread.sp = (unsigned long) childregs;
	p->thread.sp = (unsigned long) childregs;
	p->thread.sp0 = (unsigned long) (childregs+1);
	p->thread.sp0 = (unsigned long) (childregs+1);
	memset(p->thread.ptrace_bps, 0, sizeof(p->thread.ptrace_bps));


	if (unlikely(p->flags & PF_KTHREAD)) {
	if (unlikely(p->flags & PF_KTHREAD)) {
		/* kernel thread */
		/* kernel thread */
@@ -152,9 +153,7 @@ int copy_thread(unsigned long clone_flags, unsigned long sp,
		childregs->orig_ax = -1;
		childregs->orig_ax = -1;
		childregs->cs = __KERNEL_CS | get_kernel_rpl();
		childregs->cs = __KERNEL_CS | get_kernel_rpl();
		childregs->flags = X86_EFLAGS_IF | X86_EFLAGS_FIXED;
		childregs->flags = X86_EFLAGS_IF | X86_EFLAGS_FIXED;
		p->thread.fpu_counter = 0;
		p->thread.io_bitmap_ptr = NULL;
		p->thread.io_bitmap_ptr = NULL;
		memset(p->thread.ptrace_bps, 0, sizeof(p->thread.ptrace_bps));
		return 0;
		return 0;
	}
	}
	*childregs = *current_pt_regs();
	*childregs = *current_pt_regs();
@@ -165,13 +164,10 @@ int copy_thread(unsigned long clone_flags, unsigned long sp,
	p->thread.ip = (unsigned long) ret_from_fork;
	p->thread.ip = (unsigned long) ret_from_fork;
	task_user_gs(p) = get_user_gs(current_pt_regs());
	task_user_gs(p) = get_user_gs(current_pt_regs());


	p->thread.fpu_counter = 0;
	p->thread.io_bitmap_ptr = NULL;
	p->thread.io_bitmap_ptr = NULL;
	tsk = current;
	tsk = current;
	err = -ENOMEM;
	err = -ENOMEM;


	memset(p->thread.ptrace_bps, 0, sizeof(p->thread.ptrace_bps));

	if (unlikely(test_tsk_thread_flag(tsk, TIF_IO_BITMAP))) {
	if (unlikely(test_tsk_thread_flag(tsk, TIF_IO_BITMAP))) {
		p->thread.io_bitmap_ptr = kmemdup(tsk->thread.io_bitmap_ptr,
		p->thread.io_bitmap_ptr = kmemdup(tsk->thread.io_bitmap_ptr,
						IO_BITMAP_BYTES, GFP_KERNEL);
						IO_BITMAP_BYTES, GFP_KERNEL);
+0 −3
Original line number Original line Diff line number Diff line
@@ -163,7 +163,6 @@ int copy_thread(unsigned long clone_flags, unsigned long sp,
	p->thread.sp = (unsigned long) childregs;
	p->thread.sp = (unsigned long) childregs;
	p->thread.usersp = me->thread.usersp;
	p->thread.usersp = me->thread.usersp;
	set_tsk_thread_flag(p, TIF_FORK);
	set_tsk_thread_flag(p, TIF_FORK);
	p->thread.fpu_counter = 0;
	p->thread.io_bitmap_ptr = NULL;
	p->thread.io_bitmap_ptr = NULL;


	savesegment(gs, p->thread.gsindex);
	savesegment(gs, p->thread.gsindex);
@@ -193,8 +192,6 @@ int copy_thread(unsigned long clone_flags, unsigned long sp,
		childregs->sp = sp;
		childregs->sp = sp;


	err = -ENOMEM;
	err = -ENOMEM;
	memset(p->thread.ptrace_bps, 0, sizeof(p->thread.ptrace_bps));

	if (unlikely(test_tsk_thread_flag(me, TIF_IO_BITMAP))) {
	if (unlikely(test_tsk_thread_flag(me, TIF_IO_BITMAP))) {
		p->thread.io_bitmap_ptr = kmemdup(me->thread.io_bitmap_ptr,
		p->thread.io_bitmap_ptr = kmemdup(me->thread.io_bitmap_ptr,
						  IO_BITMAP_BYTES, GFP_KERNEL);
						  IO_BITMAP_BYTES, GFP_KERNEL);
+5 −0
Original line number Original line Diff line number Diff line
@@ -675,6 +675,11 @@ handle_signal(struct ksignal *ksig, struct pt_regs *regs)
		 * handler too.
		 * handler too.
		 */
		 */
		regs->flags &= ~(X86_EFLAGS_DF|X86_EFLAGS_RF|X86_EFLAGS_TF);
		regs->flags &= ~(X86_EFLAGS_DF|X86_EFLAGS_RF|X86_EFLAGS_TF);
		/*
		 * Ensure the signal handler starts with the new fpu state.
		 */
		if (used_math())
			drop_init_fpu(current);
	}
	}
	signal_setup_done(failed, ksig, test_thread_flag(TIF_SINGLESTEP));
	signal_setup_done(failed, ksig, test_thread_flag(TIF_SINGLESTEP));
}
}
Loading