Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit c7e49c14 authored by Oleg Nesterov's avatar Oleg Nesterov Committed by Linus Torvalds
Browse files

ptrace: optimize exit_ptrace() for the likely case



exit_ptrace() takes tasklist_lock unconditionally.  We need this lock to
avoid the race with ptrace_traceme(), it acts as a barrier.

Change its caller, forget_original_parent(), to call exit_ptrace() under
tasklist_lock.  Change exit_ptrace() to drop and reacquire this lock if
needed.

This allows us to add the fastpath list_empty(ptraced) check.  In the
likely no-tracees case exit_ptrace() just returns and we avoid the lock()
+ unlock() sequence.

"Zhang, Yanmin" <yanmin_zhang@linux.intel.com> suggested to add this
check, and he reports that this change adds about 11% improvement in some
tests.

Suggested-and-tested-by: default avatar"Zhang, Yanmin" <yanmin_zhang@linux.intel.com>
Signed-off-by: default avatarOleg Nesterov <oleg@redhat.com>
Acked-by: default avatarRoland McGrath <roland@redhat.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 13d7e3a2
Loading
Loading
Loading
Loading
+5 −2
Original line number Original line Diff line number Diff line
@@ -771,9 +771,12 @@ static void forget_original_parent(struct task_struct *father)
	struct task_struct *p, *n, *reaper;
	struct task_struct *p, *n, *reaper;
	LIST_HEAD(dead_children);
	LIST_HEAD(dead_children);


	exit_ptrace(father);

	write_lock_irq(&tasklist_lock);
	write_lock_irq(&tasklist_lock);
	/*
	 * Note that exit_ptrace() and find_new_reaper() might
	 * drop tasklist_lock and reacquire it.
	 */
	exit_ptrace(father);
	reaper = find_new_reaper(father);
	reaper = find_new_reaper(father);


	list_for_each_entry_safe(p, n, &father->children, sibling) {
	list_for_each_entry_safe(p, n, &father->children, sibling) {
+9 −3
Original line number Original line Diff line number Diff line
@@ -324,26 +324,32 @@ int ptrace_detach(struct task_struct *child, unsigned int data)
}
}


/*
/*
 * Detach all tasks we were using ptrace on.
 * Detach all tasks we were using ptrace on. Called with tasklist held
 * for writing, and returns with it held too. But note it can release
 * and reacquire the lock.
 */
 */
void exit_ptrace(struct task_struct *tracer)
void exit_ptrace(struct task_struct *tracer)
{
{
	struct task_struct *p, *n;
	struct task_struct *p, *n;
	LIST_HEAD(ptrace_dead);
	LIST_HEAD(ptrace_dead);


	write_lock_irq(&tasklist_lock);
	if (likely(list_empty(&tracer->ptraced)))
		return;

	list_for_each_entry_safe(p, n, &tracer->ptraced, ptrace_entry) {
	list_for_each_entry_safe(p, n, &tracer->ptraced, ptrace_entry) {
		if (__ptrace_detach(tracer, p))
		if (__ptrace_detach(tracer, p))
			list_add(&p->ptrace_entry, &ptrace_dead);
			list_add(&p->ptrace_entry, &ptrace_dead);
	}
	}
	write_unlock_irq(&tasklist_lock);


	write_unlock_irq(&tasklist_lock);
	BUG_ON(!list_empty(&tracer->ptraced));
	BUG_ON(!list_empty(&tracer->ptraced));


	list_for_each_entry_safe(p, n, &ptrace_dead, ptrace_entry) {
	list_for_each_entry_safe(p, n, &ptrace_dead, ptrace_entry) {
		list_del_init(&p->ptrace_entry);
		list_del_init(&p->ptrace_entry);
		release_task(p);
		release_task(p);
	}
	}

	write_lock_irq(&tasklist_lock);
}
}


int ptrace_readdata(struct task_struct *tsk, unsigned long src, char __user *dst, int len)
int ptrace_readdata(struct task_struct *tsk, unsigned long src, char __user *dst, int len)