Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit bf1af3a8 authored by Ingo Molnar's avatar Ingo Molnar
Browse files

Merge branch 'tip/perf/urgent' of...

Merge branch 'tip/perf/urgent' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt/linux-2.6-trace into perf/core
parents 0de4b34d 868baf07
Loading
Loading
Loading
Loading
+2 −0
Original line number Diff line number Diff line
@@ -428,6 +428,7 @@ extern void unregister_ftrace_graph(void);

extern void ftrace_graph_init_task(struct task_struct *t);
extern void ftrace_graph_exit_task(struct task_struct *t);
extern void ftrace_graph_init_idle_task(struct task_struct *t, int cpu);

static inline int task_curr_ret_stack(struct task_struct *t)
{
@@ -451,6 +452,7 @@ static inline void unpause_graph_tracing(void)

static inline void ftrace_graph_init_task(struct task_struct *t) { }
static inline void ftrace_graph_exit_task(struct task_struct *t) { }
static inline void ftrace_graph_init_idle_task(struct task_struct *t, int cpu) { }

static inline int register_ftrace_graph(trace_func_graph_ret_t retfunc,
			  trace_func_graph_ent_t entryfunc)
+1 −1
Original line number Diff line number Diff line
@@ -5550,7 +5550,7 @@ void __cpuinit init_idle(struct task_struct *idle, int cpu)
	 * The idle tasks have their own, simple scheduling class:
	 */
	idle->sched_class = &idle_sched_class;
	ftrace_graph_init_task(idle);
	ftrace_graph_init_idle_task(idle, cpu);
}

/*
+45 −7
Original line number Diff line number Diff line
@@ -3328,7 +3328,7 @@ static int start_graph_tracing(void)
	/* The cpu_boot init_task->ret_stack will never be freed */
	for_each_online_cpu(cpu) {
		if (!idle_task(cpu)->ret_stack)
			ftrace_graph_init_task(idle_task(cpu));
			ftrace_graph_init_idle_task(idle_task(cpu), cpu);
	}

	do {
@@ -3418,6 +3418,49 @@ void unregister_ftrace_graph(void)
	mutex_unlock(&ftrace_lock);
}

static DEFINE_PER_CPU(struct ftrace_ret_stack *, idle_ret_stack);

static void
graph_init_task(struct task_struct *t, struct ftrace_ret_stack *ret_stack)
{
	atomic_set(&t->tracing_graph_pause, 0);
	atomic_set(&t->trace_overrun, 0);
	t->ftrace_timestamp = 0;
	/* make curr_ret_stack visable before we add the ret_stack */
	smp_wmb();
	t->ret_stack = ret_stack;
}

/*
 * Allocate a return stack for the idle task. May be the first
 * time through, or it may be done by CPU hotplug online.
 */
void ftrace_graph_init_idle_task(struct task_struct *t, int cpu)
{
	t->curr_ret_stack = -1;
	/*
	 * The idle task has no parent, it either has its own
	 * stack or no stack at all.
	 */
	if (t->ret_stack)
		WARN_ON(t->ret_stack != per_cpu(idle_ret_stack, cpu));

	if (ftrace_graph_active) {
		struct ftrace_ret_stack *ret_stack;

		ret_stack = per_cpu(idle_ret_stack, cpu);
		if (!ret_stack) {
			ret_stack = kmalloc(FTRACE_RETFUNC_DEPTH
					    * sizeof(struct ftrace_ret_stack),
					    GFP_KERNEL);
			if (!ret_stack)
				return;
			per_cpu(idle_ret_stack, cpu) = ret_stack;
		}
		graph_init_task(t, ret_stack);
	}
}

/* Allocate a return stack for newly created task */
void ftrace_graph_init_task(struct task_struct *t)
{
@@ -3433,12 +3476,7 @@ void ftrace_graph_init_task(struct task_struct *t)
				GFP_KERNEL);
		if (!ret_stack)
			return;
		atomic_set(&t->tracing_graph_pause, 0);
		atomic_set(&t->trace_overrun, 0);
		t->ftrace_timestamp = 0;
		/* make curr_ret_stack visable before we add the ret_stack */
		smp_wmb();
		t->ret_stack = ret_stack;
		graph_init_task(t, ret_stack);
	}
}