Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit ed3e694d authored by Al Viro's avatar Al Viro
Browse files

move exit_task_work() past exit_files() et.al.



... and get rid of PF_EXITING check in task_work_add().

Signed-off-by: default avatarAl Viro <viro@zeniv.linux.org.uk>
parent 67d12145
Loading
Loading
Loading
Loading
+2 −4
Original line number Diff line number Diff line
@@ -953,14 +953,11 @@ void do_exit(long code)
	exit_signals(tsk);  /* sets PF_EXITING */
	/*
	 * tsk->flags are checked in the futex code to protect against
	 * an exiting task cleaning up the robust pi futexes, and in
	 * task_work_add() to avoid the race with exit_task_work().
	 * an exiting task cleaning up the robust pi futexes.
	 */
	smp_mb();
	raw_spin_unlock_wait(&tsk->pi_lock);

	exit_task_work(tsk);

	if (unlikely(in_atomic()))
		printk(KERN_INFO "note: %s[%d] exited with preempt_count %d\n",
				current->comm, task_pid_nr(current),
@@ -995,6 +992,7 @@ void do_exit(long code)
	exit_shm(tsk);
	exit_files(tsk);
	exit_fs(tsk);
	exit_task_work(tsk);
	check_stack_usage();
	exit_thread();

+11 −19
Original line number Diff line number Diff line
@@ -5,34 +5,26 @@
int
task_work_add(struct task_struct *task, struct callback_head *twork, bool notify)
{
	struct callback_head *last, *first;
	unsigned long flags;
	int err = -ESRCH;

#ifndef TIF_NOTIFY_RESUME
	if (notify)
		return -ENOTSUPP;
#endif
	/*
	 * We must not insert the new work if the task has already passed
	 * exit_task_work(). We rely on do_exit()->raw_spin_unlock_wait()
	 * and check PF_EXITING under pi_lock.
	 * Not inserting the new work if the task has already passed
	 * exit_task_work() is the responisbility of callers.
	 */
	raw_spin_lock_irqsave(&task->pi_lock, flags);
	if (likely(!(task->flags & PF_EXITING))) {
		struct callback_head *last = task->task_works;
		struct callback_head *first = last ? last->next : twork;
	last = task->task_works;
	first = last ? last->next : twork;
	twork->next = first;
	if (last)
		last->next = twork;
	task->task_works = twork;
		err = 0;
	}
	raw_spin_unlock_irqrestore(&task->pi_lock, flags);

	/* test_and_set_bit() implies mb(), see tracehook_notify_resume(). */
	if (likely(!err) && notify)
	if (notify)
		set_notify_resume(task);
	return err;
	return 0;
}

struct callback_head *