Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 4369ef3c authored by Paul E. McKenney's avatar Paul E. McKenney Committed by Linus Torvalds
Browse files

[PATCH] Make RCU task_struct safe for oprofile



Applying RCU to the task structure broke oprofile, because
free_task_notify() can now be called from softirq.  This means that the
task_mortuary lock must be acquired with irq disabled in order to avoid
intermittent self-deadlock.  Since irq is now disabled, the critical
section within process_task_mortuary() has been restructured to be O(1) in
order to maximize scalability and minimize realtime latency degradation.

Kudos to Wu Fengguang for finding this problem!

CC: Wu Fengguang <wfg@mail.ustc.edu.cn>
Cc: Philippe Elie <phil.el@wanadoo.fr>
Cc: John Levon <levon@movementarian.org>
Signed-off-by: default avatar"Paul E. McKenney" <paulmck@us.ibm.com>
Signed-off-by: default avatarAndrew Morton <akpm@osdl.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@osdl.org>
parent eafbaa94
Loading
Loading
Loading
Loading
+15 −15
Original line number Original line Diff line number Diff line
@@ -43,13 +43,16 @@ static void process_task_mortuary(void);
 * list for processing. Only after two full buffer syncs
 * list for processing. Only after two full buffer syncs
 * does the task eventually get freed, because by then
 * does the task eventually get freed, because by then
 * we are sure we will not reference it again.
 * we are sure we will not reference it again.
 * Can be invoked from softirq via RCU callback due to
 * call_rcu() of the task struct, hence the _irqsave.
 */
 */
static int task_free_notify(struct notifier_block * self, unsigned long val, void * data)
static int task_free_notify(struct notifier_block * self, unsigned long val, void * data)
{
{
	unsigned long flags;
	struct task_struct * task = data;
	struct task_struct * task = data;
	spin_lock(&task_mortuary);
	spin_lock_irqsave(&task_mortuary, flags);
	list_add(&task->tasks, &dying_tasks);
	list_add(&task->tasks, &dying_tasks);
	spin_unlock(&task_mortuary);
	spin_unlock_irqrestore(&task_mortuary, flags);
	return NOTIFY_OK;
	return NOTIFY_OK;
}
}


@@ -431,25 +434,22 @@ static void increment_tail(struct oprofile_cpu_buffer * b)
 */
 */
static void process_task_mortuary(void)
static void process_task_mortuary(void)
{
{
	struct list_head * pos;
	unsigned long flags;
	struct list_head * pos2;
	LIST_HEAD(local_dead_tasks);
	struct task_struct * task;
	struct task_struct * task;
	struct task_struct * ttask;


	spin_lock(&task_mortuary);
	spin_lock_irqsave(&task_mortuary, flags);


	list_for_each_safe(pos, pos2, &dead_tasks) {
	list_splice_init(&dead_tasks, &local_dead_tasks);
		task = list_entry(pos, struct task_struct, tasks);
	list_splice_init(&dying_tasks, &dead_tasks);
		list_del(&task->tasks);
		free_task(task);
	}


	list_for_each_safe(pos, pos2, &dying_tasks) {
	spin_unlock_irqrestore(&task_mortuary, flags);
		task = list_entry(pos, struct task_struct, tasks);

	list_for_each_entry_safe(task, ttask, &local_dead_tasks, tasks) {
		list_del(&task->tasks);
		list_del(&task->tasks);
		list_add_tail(&task->tasks, &dead_tasks);
		free_task(task);
	}
	}

	spin_unlock(&task_mortuary);
}
}