Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 991ec02c authored by Linus Torvalds's avatar Linus Torvalds
Browse files

Merge branch 'tracing-urgent-for-linus' of...

Merge branch 'tracing-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip

* 'tracing-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip:
  function-graph: always initialize task ret_stack
  function-graph: move initialization of new tasks up in fork
  function-graph: add memory barriers for accessing task's ret_stack
  function-graph: enable the stack after initialization of other variables
  function-graph: only allocate init tasks if it was not already done

Manually fix trivial conflict in kernel/trace/ftrace.c
parents 86236611 84047e36
Loading
Loading
Loading
Loading
+4 −6
Original line number Diff line number Diff line
@@ -981,6 +981,8 @@ static struct task_struct *copy_process(unsigned long clone_flags,
	if (!p)
		goto fork_out;

	ftrace_graph_init_task(p);

	rt_mutex_init_task(p);

#ifdef CONFIG_PROVE_LOCKING
@@ -1130,8 +1132,6 @@ static struct task_struct *copy_process(unsigned long clone_flags,
		}
	}

	ftrace_graph_init_task(p);

	p->pid = pid_nr(pid);
	p->tgid = p->pid;
	if (clone_flags & CLONE_THREAD)
@@ -1140,7 +1140,7 @@ static struct task_struct *copy_process(unsigned long clone_flags,
	if (current->nsproxy != p->nsproxy) {
		retval = ns_cgroup_clone(p, pid);
		if (retval)
			goto bad_fork_free_graph;
			goto bad_fork_free_pid;
	}

	p->set_child_tid = (clone_flags & CLONE_CHILD_SETTID) ? child_tidptr : NULL;
@@ -1232,7 +1232,7 @@ static struct task_struct *copy_process(unsigned long clone_flags,
		spin_unlock(&current->sighand->siglock);
		write_unlock_irq(&tasklist_lock);
		retval = -ERESTARTNOINTR;
		goto bad_fork_free_graph;
		goto bad_fork_free_pid;
	}

	if (clone_flags & CLONE_THREAD) {
@@ -1267,8 +1267,6 @@ static struct task_struct *copy_process(unsigned long clone_flags,
	cgroup_post_fork(p);
	return p;

bad_fork_free_graph:
	ftrace_graph_exit_task(p);
bad_fork_free_pid:
	if (pid != &init_struct_pid)
		free_pid(pid);
+19 −10
Original line number Diff line number Diff line
@@ -3218,12 +3218,12 @@ static int alloc_retstack_tasklist(struct ftrace_ret_stack **ret_stack_list)
		}

		if (t->ret_stack == NULL) {
			t->curr_ret_stack = -1;
			/* Make sure IRQs see the -1 first: */
			barrier();
			t->ret_stack = ret_stack_list[start++];
			atomic_set(&t->tracing_graph_pause, 0);
			atomic_set(&t->trace_overrun, 0);
			t->curr_ret_stack = -1;
			/* Make sure the tasks see the -1 first: */
			smp_wmb();
			t->ret_stack = ret_stack_list[start++];
		}
	} while_each_thread(g, t);

@@ -3281,8 +3281,10 @@ static int start_graph_tracing(void)
		return -ENOMEM;

	/* The cpu_boot init_task->ret_stack will never be freed */
	for_each_online_cpu(cpu)
	for_each_online_cpu(cpu) {
		if (!idle_task(cpu)->ret_stack)
			ftrace_graph_init_task(idle_task(cpu));
	}

	do {
		ret = alloc_retstack_tasklist(ret_stack_list);
@@ -3374,18 +3376,25 @@ void unregister_ftrace_graph(void)
/* Allocate a return stack for newly created task */
void ftrace_graph_init_task(struct task_struct *t)
{
	/* Make sure we do not use the parent ret_stack */
	t->ret_stack = NULL;

	if (ftrace_graph_active) {
		t->ret_stack = kmalloc(FTRACE_RETFUNC_DEPTH
		struct ftrace_ret_stack *ret_stack;

		ret_stack = kmalloc(FTRACE_RETFUNC_DEPTH
				* sizeof(struct ftrace_ret_stack),
				GFP_KERNEL);
		if (!t->ret_stack)
		if (!ret_stack)
			return;
		t->curr_ret_stack = -1;
		atomic_set(&t->tracing_graph_pause, 0);
		atomic_set(&t->trace_overrun, 0);
		t->ftrace_timestamp = 0;
	} else
		t->ret_stack = NULL;
		/* make curr_ret_stack visable before we add the ret_stack */
		smp_wmb();
		t->ret_stack = ret_stack;
	}
}

void ftrace_graph_exit_task(struct task_struct *t)
+6 −0
Original line number Diff line number Diff line
@@ -65,6 +65,12 @@ ftrace_push_return_trace(unsigned long ret, unsigned long func, int *depth)
	if (!current->ret_stack)
		return -EBUSY;

	/*
	 * We must make sure the ret_stack is tested before we read
	 * anything else.
	 */
	smp_rmb();

	/* The return trace stack is full */
	if (current->curr_ret_stack == FTRACE_RETFUNC_DEPTH - 1) {
		atomic_inc(&current->trace_overrun);